system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000c06c5_00000000-6_find_all_sums_hub_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z57__device_stub__Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_PiiPfS_S_S0_S0_
.type _Z57__device_stub__Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_PiiPfS_S_S0_S0_, @function
_Z57__device_stub__Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_PiiPfS_S_S0_S0_:
.LFB2051:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 56(%rsp)
movl %esi, 52(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movq %r9, 16(%rsp)
movq 208(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 52(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 216
pushq 72(%rsp)
.cfi_def_cfa_offset 224
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z57__device_stub__Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_PiiPfS_S_S0_S0_, .-_Z57__device_stub__Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_PiiPfS_S_S0_S0_
.globl _Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_
.type _Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_, @function
_Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_:
.LFB2052:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z57__device_stub__Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_PiiPfS_S_S0_S0_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_, .-_Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "find_all_sums_hub_kernel.hip"
.globl _Z39__device_stub__find_all_sums_hub_kernelPiiPfS_S_S0_S0_ # -- Begin function _Z39__device_stub__find_all_sums_hub_kernelPiiPfS_S_S0_S0_
.p2align 4, 0x90
.type _Z39__device_stub__find_all_sums_hub_kernelPiiPfS_S_S0_S0_,@function
_Z39__device_stub__find_all_sums_hub_kernelPiiPfS_S_S0_S0_: # @_Z39__device_stub__find_all_sums_hub_kernelPiiPfS_S_S0_S0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movl %esi, 4(%rsp)
movq %rdx, 80(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 64(%rsp)
movq %r9, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z39__device_stub__find_all_sums_hub_kernelPiiPfS_S_S0_S0_, .Lfunc_end0-_Z39__device_stub__find_all_sums_hub_kernelPiiPfS_S_S0_S0_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_,@object # @_Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_
.section .rodata,"a",@progbits
.globl _Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_
.p2align 3, 0x0
_Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_:
.quad _Z39__device_stub__find_all_sums_hub_kernelPiiPfS_S_S0_S0_
.size _Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_"
.size .L__unnamed_1, 44
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z39__device_stub__find_all_sums_hub_kernelPiiPfS_S_S0_S0_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z24find_all_sums_hub_kernelPiiPfS_S_S0_S0_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/*YOUR CODE HERE*/
//basically check if source or dst is matched, then set to 1
//if not matched then I can set to keepEdges[tid] = 0
int tid = blockIdx.x * blockDim.x + threadIdx.x;//this is the thread id and will be used as index
while( tid < numEdges )
{
if( matches[src[tid]] == -1 && matches[dst[tid]] == -1 )//either source or destination has an unmatched node, meaning we want to keep this edge
{
keepEdges[tid] = 1;//if we want to keep the edge, set it to 1
}
else
{
keepEdges[tid] = 0;//otherwise set it to 0
}
tid += ( blockDim.x * gridDim.x );
}
return;
} | code for sm_80
Function : _Z19markFilterEdges_gpuPiS_S_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fc800078e00ff */
/*0080*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x001fcc00078e0207 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IMAD.WIDE R4, R2, R7, c[0x0][0x170] ; /* 0x00005c0002047625 */
/* 0x004fcc00078e0207 */
/*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ SHF.R.S32.HI R11, RZ, 0x1f, R0 ; /* 0x0000001fff0b7819 */
/* 0x000fe40000011400 */
/*00d0*/ ISETP.NE.AND P0, PT, R4, -0x1, PT ; /* 0xffffffff0400780c */
/* 0x004fda0003f05270 */
/*00e0*/ @!P0 LEA R8, P1, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000088a11 */
/* 0x000fc800078210ff */
/*00f0*/ @!P0 LEA.HI.X R9, R0, c[0x0][0x16c], R11, 0x2, P1 ; /* 0x00005b0000098a11 */
/* 0x000fca00008f140b */
/*0100*/ @!P0 LDG.E R6, [R8.64] ; /* 0x0000000408068981 */
/* 0x000ea4000c1e1900 */
/*0110*/ @!P0 IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006068625 */
/* 0x004fcc00078e0207 */
/*0120*/ @!P0 LDG.E R6, [R6.64] ; /* 0x0000000406068981 */
/* 0x000ea2000c1e1900 */
/*0130*/ LEA R10, P1, R0, c[0x0][0x178], 0x2 ; /* 0x00005e00000a7a11 */
/* 0x000fe200078210ff */
/*0140*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff057624 */
/* 0x000fc600078e00ff */
/*0150*/ LEA.HI.X R11, R0, c[0x0][0x17c], R11, 0x2, P1 ; /* 0x00005f00000b7a11 */
/* 0x000fe200008f140b */
/*0160*/ IMAD R0, R5, c[0x0][0xc], R0 ; /* 0x0000030005007a24 */
/* 0x000fe200078e0200 */
/*0170*/ ISETP.EQ.AND P0, PT, R6, -0x1, !P0 ; /* 0xffffffff0600780c */
/* 0x004fc80004702270 */
/*0180*/ SEL R3, RZ, 0x1, !P0 ; /* 0x00000001ff037807 */
/* 0x000fe40004000000 */
/*0190*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */
/* 0x000fc60003f06270 */
/*01a0*/ STG.E [R10.64], R3 ; /* 0x000000030a007986 */
/* 0x0001f4000c101904 */
/*01b0*/ @!P0 BRA 0x70 ; /* 0xfffffeb000008947 */
/* 0x000fea000383ffff */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/*YOUR CODE HERE*/
//basically check if source or dst is matched, then set to 1
//if not matched then I can set to keepEdges[tid] = 0
int tid = blockIdx.x * blockDim.x + threadIdx.x;//this is the thread id and will be used as index
while( tid < numEdges )
{
if( matches[src[tid]] == -1 && matches[dst[tid]] == -1 )//either source or destination has an unmatched node, meaning we want to keep this edge
{
keepEdges[tid] = 1;//if we want to keep the edge, set it to 1
}
else
{
keepEdges[tid] = 0;//otherwise set it to 0
}
tid += ( blockDim.x * gridDim.x );
}
return;
} | .file "tmpxft_000d8b08_00000000-6_markFilterEdges.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i
.type _Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i, @function
_Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i:
.LFB2081:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z19markFilterEdges_gpuPiS_S_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i, .-_Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i
.globl _Z19markFilterEdges_gpuPiS_S_S_i
.type _Z19markFilterEdges_gpuPiS_S_S_i, @function
_Z19markFilterEdges_gpuPiS_S_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z19markFilterEdges_gpuPiS_S_S_i, .-_Z19markFilterEdges_gpuPiS_S_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19markFilterEdges_gpuPiS_S_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19markFilterEdges_gpuPiS_S_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/*YOUR CODE HERE*/
//basically check if source or dst is matched, then set to 1
//if not matched then I can set to keepEdges[tid] = 0
int tid = blockIdx.x * blockDim.x + threadIdx.x;//this is the thread id and will be used as index
while( tid < numEdges )
{
if( matches[src[tid]] == -1 && matches[dst[tid]] == -1 )//either source or destination has an unmatched node, meaning we want to keep this edge
{
keepEdges[tid] = 1;//if we want to keep the edge, set it to 1
}
else
{
keepEdges[tid] = 0;//otherwise set it to 0
}
tid += ( blockDim.x * gridDim.x );
}
return;
} | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/*YOUR CODE HERE*/
//basically check if source or dst is matched, then set to 1
//if not matched then I can set to keepEdges[tid] = 0
int tid = blockIdx.x * blockDim.x + threadIdx.x;//this is the thread id and will be used as index
while( tid < numEdges )
{
if( matches[src[tid]] == -1 && matches[dst[tid]] == -1 )//either source or destination has an unmatched node, meaning we want to keep this edge
{
keepEdges[tid] = 1;//if we want to keep the edge, set it to 1
}
else
{
keepEdges[tid] = 0;//otherwise set it to 0
}
tid += ( blockDim.x * gridDim.x );
}
return;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/*YOUR CODE HERE*/
//basically check if source or dst is matched, then set to 1
//if not matched then I can set to keepEdges[tid] = 0
int tid = blockIdx.x * blockDim.x + threadIdx.x;//this is the thread id and will be used as index
while( tid < numEdges )
{
if( matches[src[tid]] == -1 && matches[dst[tid]] == -1 )//either source or destination has an unmatched node, meaning we want to keep this edge
{
keepEdges[tid] = 1;//if we want to keep the edge, set it to 1
}
else
{
keepEdges[tid] = 0;//otherwise set it to 0
}
tid += ( blockDim.x * gridDim.x );
}
return;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19markFilterEdges_gpuPiS_S_S_i
.globl _Z19markFilterEdges_gpuPiS_S_S_i
.p2align 8
.type _Z19markFilterEdges_gpuPiS_S_S_i,@function
_Z19markFilterEdges_gpuPiS_S_S_i:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x34
s_load_b32 s12, s[0:1], 0x20
s_add_u32 s2, s0, 40
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s13, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s13, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s12, v1
s_cbranch_execz .LBB0_7
s_load_b32 s2, s[2:3], 0x0
s_load_b256 s[4:11], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s2, s13
s_mov_b32 s2, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v2, s0, s10, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v3, s0, s11, v3, s0
v_cmp_le_i32_e32 vcc_lo, s12, v1
global_store_b32 v[2:3], v0, off
s_or_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB0_7
.LBB0_3:
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v5, vcc_lo
global_load_b32 v0, v[4:5], off
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e64 s0, -1, v0
v_cmpx_eq_u32_e32 -1, v0
s_cbranch_execz .LBB0_5
v_add_co_u32 v4, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
s_and_not1_b32 s0, s0, exec_lo
s_mov_b32 s13, 1
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_add_co_u32 v4, vcc_lo, s8, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v5, vcc_lo
global_load_b32 v0, v[4:5], off
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, -1, v0
s_and_b32 s14, vcc_lo, exec_lo
s_or_b32 s0, s0, s14
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s3
v_mov_b32_e32 v0, s13
s_and_saveexec_b32 s3, s0
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v0, 0
s_branch .LBB0_2
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19markFilterEdges_gpuPiS_S_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19markFilterEdges_gpuPiS_S_S_i, .Lfunc_end0-_Z19markFilterEdges_gpuPiS_S_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19markFilterEdges_gpuPiS_S_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19markFilterEdges_gpuPiS_S_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/*YOUR CODE HERE*/
//basically check if source or dst is matched, then set to 1
//if not matched then I can set to keepEdges[tid] = 0
int tid = blockIdx.x * blockDim.x + threadIdx.x;//this is the thread id and will be used as index
while( tid < numEdges )
{
if( matches[src[tid]] == -1 && matches[dst[tid]] == -1 )//either source or destination has an unmatched node, meaning we want to keep this edge
{
keepEdges[tid] = 1;//if we want to keep the edge, set it to 1
}
else
{
keepEdges[tid] = 0;//otherwise set it to 0
}
tid += ( blockDim.x * gridDim.x );
}
return;
} | .text
.file "markFilterEdges.hip"
.globl _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i # -- Begin function _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i
.p2align 4, 0x90
.type _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i,@function
_Z34__device_stub__markFilterEdges_gpuPiS_S_S_i: # @_Z34__device_stub__markFilterEdges_gpuPiS_S_S_i
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z19markFilterEdges_gpuPiS_S_S_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i, .Lfunc_end0-_Z34__device_stub__markFilterEdges_gpuPiS_S_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19markFilterEdges_gpuPiS_S_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19markFilterEdges_gpuPiS_S_S_i,@object # @_Z19markFilterEdges_gpuPiS_S_S_i
.section .rodata,"a",@progbits
.globl _Z19markFilterEdges_gpuPiS_S_S_i
.p2align 3, 0x0
_Z19markFilterEdges_gpuPiS_S_S_i:
.quad _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i
.size _Z19markFilterEdges_gpuPiS_S_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19markFilterEdges_gpuPiS_S_S_i"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19markFilterEdges_gpuPiS_S_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z19markFilterEdges_gpuPiS_S_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fc800078e00ff */
/*0080*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x001fcc00078e0207 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IMAD.WIDE R4, R2, R7, c[0x0][0x170] ; /* 0x00005c0002047625 */
/* 0x004fcc00078e0207 */
/*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ SHF.R.S32.HI R11, RZ, 0x1f, R0 ; /* 0x0000001fff0b7819 */
/* 0x000fe40000011400 */
/*00d0*/ ISETP.NE.AND P0, PT, R4, -0x1, PT ; /* 0xffffffff0400780c */
/* 0x004fda0003f05270 */
/*00e0*/ @!P0 LEA R8, P1, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000088a11 */
/* 0x000fc800078210ff */
/*00f0*/ @!P0 LEA.HI.X R9, R0, c[0x0][0x16c], R11, 0x2, P1 ; /* 0x00005b0000098a11 */
/* 0x000fca00008f140b */
/*0100*/ @!P0 LDG.E R6, [R8.64] ; /* 0x0000000408068981 */
/* 0x000ea4000c1e1900 */
/*0110*/ @!P0 IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006068625 */
/* 0x004fcc00078e0207 */
/*0120*/ @!P0 LDG.E R6, [R6.64] ; /* 0x0000000406068981 */
/* 0x000ea2000c1e1900 */
/*0130*/ LEA R10, P1, R0, c[0x0][0x178], 0x2 ; /* 0x00005e00000a7a11 */
/* 0x000fe200078210ff */
/*0140*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff057624 */
/* 0x000fc600078e00ff */
/*0150*/ LEA.HI.X R11, R0, c[0x0][0x17c], R11, 0x2, P1 ; /* 0x00005f00000b7a11 */
/* 0x000fe200008f140b */
/*0160*/ IMAD R0, R5, c[0x0][0xc], R0 ; /* 0x0000030005007a24 */
/* 0x000fe200078e0200 */
/*0170*/ ISETP.EQ.AND P0, PT, R6, -0x1, !P0 ; /* 0xffffffff0600780c */
/* 0x004fc80004702270 */
/*0180*/ SEL R3, RZ, 0x1, !P0 ; /* 0x00000001ff037807 */
/* 0x000fe40004000000 */
/*0190*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */
/* 0x000fc60003f06270 */
/*01a0*/ STG.E [R10.64], R3 ; /* 0x000000030a007986 */
/* 0x0001f4000c101904 */
/*01b0*/ @!P0 BRA 0x70 ; /* 0xfffffeb000008947 */
/* 0x000fea000383ffff */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19markFilterEdges_gpuPiS_S_S_i
.globl _Z19markFilterEdges_gpuPiS_S_S_i
.p2align 8
.type _Z19markFilterEdges_gpuPiS_S_S_i,@function
_Z19markFilterEdges_gpuPiS_S_S_i:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x34
s_load_b32 s12, s[0:1], 0x20
s_add_u32 s2, s0, 40
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s13, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s13, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s12, v1
s_cbranch_execz .LBB0_7
s_load_b32 s2, s[2:3], 0x0
s_load_b256 s[4:11], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s2, s13
s_mov_b32 s2, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v2, s0, s10, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v3, s0, s11, v3, s0
v_cmp_le_i32_e32 vcc_lo, s12, v1
global_store_b32 v[2:3], v0, off
s_or_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB0_7
.LBB0_3:
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v5, vcc_lo
global_load_b32 v0, v[4:5], off
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e64 s0, -1, v0
v_cmpx_eq_u32_e32 -1, v0
s_cbranch_execz .LBB0_5
v_add_co_u32 v4, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
s_and_not1_b32 s0, s0, exec_lo
s_mov_b32 s13, 1
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_add_co_u32 v4, vcc_lo, s8, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v5, vcc_lo
global_load_b32 v0, v[4:5], off
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, -1, v0
s_and_b32 s14, vcc_lo, exec_lo
s_or_b32 s0, s0, s14
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s3
v_mov_b32_e32 v0, s13
s_and_saveexec_b32 s3, s0
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v0, 0
s_branch .LBB0_2
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19markFilterEdges_gpuPiS_S_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19markFilterEdges_gpuPiS_S_S_i, .Lfunc_end0-_Z19markFilterEdges_gpuPiS_S_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19markFilterEdges_gpuPiS_S_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19markFilterEdges_gpuPiS_S_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000d8b08_00000000-6_markFilterEdges.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i
.type _Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i, @function
_Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i:
.LFB2081:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z19markFilterEdges_gpuPiS_S_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i, .-_Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i
.globl _Z19markFilterEdges_gpuPiS_S_S_i
.type _Z19markFilterEdges_gpuPiS_S_S_i, @function
_Z19markFilterEdges_gpuPiS_S_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z19markFilterEdges_gpuPiS_S_S_iPiS_S_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z19markFilterEdges_gpuPiS_S_S_i, .-_Z19markFilterEdges_gpuPiS_S_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z19markFilterEdges_gpuPiS_S_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19markFilterEdges_gpuPiS_S_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "markFilterEdges.hip"
.globl _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i # -- Begin function _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i
.p2align 4, 0x90
.type _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i,@function
_Z34__device_stub__markFilterEdges_gpuPiS_S_S_i: # @_Z34__device_stub__markFilterEdges_gpuPiS_S_S_i
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z19markFilterEdges_gpuPiS_S_S_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i, .Lfunc_end0-_Z34__device_stub__markFilterEdges_gpuPiS_S_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19markFilterEdges_gpuPiS_S_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19markFilterEdges_gpuPiS_S_S_i,@object # @_Z19markFilterEdges_gpuPiS_S_S_i
.section .rodata,"a",@progbits
.globl _Z19markFilterEdges_gpuPiS_S_S_i
.p2align 3, 0x0
_Z19markFilterEdges_gpuPiS_S_S_i:
.quad _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i
.size _Z19markFilterEdges_gpuPiS_S_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19markFilterEdges_gpuPiS_S_S_i"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__markFilterEdges_gpuPiS_S_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19markFilterEdges_gpuPiS_S_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
extern "C" {
__global__ void helloWorldKernel(int x);
}
__global__ void helloWorldKernel(int x) {
printf("Hello World %d\n",x);
return;
} | code for sm_80
Function : helloWorldKernel
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fca00078e00ff */
/*0010*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fe20007ffe0ff */
/*0020*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff087624 */
/* 0x000fe200078e00ff */
/*0030*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0040*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0050*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe20007f1e0ff */
/*0060*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0070*/ STL [R1], R8 ; /* 0x0000000801007387 */
/* 0x0001e20000100800 */
/*0080*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x0000640000000a00 */
/*0090*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */
/* 0x000fcc00000e06ff */
/*00a0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x001fe40000000000 */
/*00b0*/ MOV R11, 0x120 ; /* 0x00000120000b7802 */
/* 0x000fe40000000f00 */
/*00c0*/ MOV R20, 0xa0 ; /* 0x000000a000147802 */
/* 0x000fc40000000f00 */
/*00d0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00e0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*00f0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0100*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0110*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*0120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0130*/ BRA 0x130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
extern "C" {
__global__ void helloWorldKernel(int x);
}
__global__ void helloWorldKernel(int x) {
printf("Hello World %d\n",x);
return;
} | .file "tmpxft_0012f284_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z16helloWorldKernelii
.type _Z35__device_stub__Z16helloWorldKernelii, @function
_Z35__device_stub__Z16helloWorldKernelii:
.LFB2081:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq helloWorldKernel(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z35__device_stub__Z16helloWorldKernelii, .-_Z35__device_stub__Z16helloWorldKernelii
.globl helloWorldKernel
.type helloWorldKernel, @function
helloWorldKernel:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z16helloWorldKernelii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size helloWorldKernel, .-helloWorldKernel
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "helloWorldKernel"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq helloWorldKernel(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
extern "C" {
__global__ void helloWorldKernel(int x);
}
__global__ void helloWorldKernel(int x) {
printf("Hello World %d\n",x);
return;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
extern "C" {
__global__ void helloWorldKernel(int x);
}
__global__ void helloWorldKernel(int x) {
printf("Hello World %d\n",x);
return;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
extern "C" {
__global__ void helloWorldKernel(int x);
}
__global__ void helloWorldKernel(int x) {
printf("Hello World %d\n",x);
return;
} | .text
.file "kernel.hip"
.globl __device_stub__helloWorldKernel # -- Begin function __device_stub__helloWorldKernel
.p2align 4, 0x90
.type __device_stub__helloWorldKernel,@function
__device_stub__helloWorldKernel: # @__device_stub__helloWorldKernel
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movl %edi, 12(%rsp)
leaq 12(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $helloWorldKernel, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size __device_stub__helloWorldKernel, .Lfunc_end0-__device_stub__helloWorldKernel
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $helloWorldKernel, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type helloWorldKernel,@object # @helloWorldKernel
.section .rodata,"a",@progbits
.globl helloWorldKernel
.p2align 3, 0x0
helloWorldKernel:
.quad __device_stub__helloWorldKernel
.size helloWorldKernel, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "helloWorldKernel"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__helloWorldKernel
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym helloWorldKernel
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0012f284_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z16helloWorldKernelii
.type _Z35__device_stub__Z16helloWorldKernelii, @function
_Z35__device_stub__Z16helloWorldKernelii:
.LFB2081:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq helloWorldKernel(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z35__device_stub__Z16helloWorldKernelii, .-_Z35__device_stub__Z16helloWorldKernelii
.globl helloWorldKernel
.type helloWorldKernel, @function
helloWorldKernel:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z16helloWorldKernelii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size helloWorldKernel, .-helloWorldKernel
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "helloWorldKernel"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq helloWorldKernel(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl __device_stub__helloWorldKernel # -- Begin function __device_stub__helloWorldKernel
.p2align 4, 0x90
.type __device_stub__helloWorldKernel,@function
__device_stub__helloWorldKernel: # @__device_stub__helloWorldKernel
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movl %edi, 12(%rsp)
leaq 12(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $helloWorldKernel, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size __device_stub__helloWorldKernel, .Lfunc_end0-__device_stub__helloWorldKernel
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $helloWorldKernel, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type helloWorldKernel,@object # @helloWorldKernel
.section .rodata,"a",@progbits
.globl helloWorldKernel
.p2align 3, 0x0
helloWorldKernel:
.quad __device_stub__helloWorldKernel
.size helloWorldKernel, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "helloWorldKernel"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__helloWorldKernel
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym helloWorldKernel
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<iostream>
#include<string>
#include<malloc.h>
#include<fstream>
#include<sstream>
#include<vector>
#include<cmath>
#include<cstdio>
#include<stdlib.h>
#include<cuda.h>
#include<cuda_runtime.h>
#define MAX_CLUSTER_SIZE 6
#define BLOCK_SIZE 16
using namespace std;
typedef vector<double> record_t;
typedef vector<record_t> data_t;
long long int max_atoms=0;
long long int max_energy_states=0;
int k_clus,cls;
double cluster_lambda=0.0;
int f=0;
istream& operator >>(istream& ins, record_t &record)
{
record.clear();
string line;
getline(ins,line);
stringstream ss(line);
string field;
while(getline(ss,field,','))
{
stringstream fs(field);
double d=0.0;
fs>>d;
record.push_back(d);
}
return ins;
}
istream& operator >> (istream& ins, data_t& data)
{
data.clear();
record_t record;
while(ins>>record)
{
data.push_back(record);
}
return ins;
}
__global__ void findMeans(const float *cluster, const float *instance, int *rnk, const int num, const int clus_size, const int maxAtoms)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// int i = threadIdx.x;
if(i<num)
{
float min=0.0;
for(int j=0;j<maxAtoms;j++)
{
float d=0.0;
float s=0.0;
for(int k=0;k<3;k++)
{
d=instance[(j*5)+k+2]-cluster[((0*clus_size)+j)*maxAtoms +k];
s=s+(d*d);
}
s=sqrt(s);
min+=s;
}
int cls=-1;
for(int clus=0;clus<clus_size;clus++)
{
float val=0.0;
for(int p=0;p<maxAtoms;p++)
{
float d=0.0;
float s=0.0;
for(int l=0;l<3;l++)
{
d=instance[((i*maxAtoms+p)*5)+l+2]-cluster[((clus*clus_size)+p)*maxAtoms +l];
s=s+(d*d);
}
s=sqrt(s);
val+=s;
}
if(val<=min)
{
cls=clus;
min=val;
}
}
rnk[i]=cls+1;
}
}
int main(void)
{
cudaError_t err=cudaSuccess;
data_t data;
ifstream infile("alanine.csv");
infile>>data;
if(!infile.eof())
{
cout<<"Error! Please check file.\n";
return 1;
}
infile.close();
cout<<"Number of rows in file : "<<data.size()<<endl;
for(long long int i=0;i<data.size();i++)
{
if(i>0)
{
if(data[i][1]<data[i-1][1])
{
max_atoms=i;
break;
}
}
}
cout<<"Number of atoms : "<<max_atoms<<endl;
max_energy_states=data[data.size()-1][0];
// max_energy_states=32;
cout<<"Total number of conformations : "<<max_energy_states<<endl;
//allocate host cluster
int s = MAX_CLUSTER_SIZE*max_atoms*3;
size_t sz = s*sizeof(float);
float *host_cluster=(float *)malloc(sz);
if(host_cluster==NULL)
cout<<"Not able to allocate host cluster"<<endl;
//Initializing host cluster
for(int i=0;i<MAX_CLUSTER_SIZE;i++)
{
int clus_num=rand()%max_energy_states + 0;
for(long long int ii=0;ii<max_atoms;ii++)
{
for(int jj=0;jj<3;jj++)
{
host_cluster[((i*MAX_CLUSTER_SIZE)+ii)*max_atoms+jj]=data[clus_num*max_atoms+ii][jj+2];
}
}
}
//allocate device cluster
float *d_clus = NULL;
cudaMalloc((void **)&d_clus, sz);
if(err!=cudaSuccess)
{
cout<<"Failed to allocate cluster on device!\n";
exit(EXIT_FAILURE);
}
//initializing device cluster
cudaMemcpy(d_clus, host_cluster, sz, cudaMemcpyHostToDevice);
if(err!=cudaSuccess)
{
cout<<"Failed to copy cluster to device\n";
exit(EXIT_FAILURE);
}
//allocate host instance
float *host_instances;
long int row=data.size();
int col=5;
long int size=row*col;
host_instances=new float[size];
for(long long int i=0;i<row;i++)
{
for(int j=0;j<col;j++)
{
host_instances[i*col+j]=data[i][j];
}
}
//allocate device instance
float *device_instances;
err=cudaMalloc((void **)&device_instances,size*sizeof(float));
if(err!=cudaSuccess)
{
cout<<"error allocating device instance"<<endl;
exit(EXIT_FAILURE);
}
err=cudaMemcpy(device_instances, host_instances, size*sizeof(float), cudaMemcpyHostToDevice);
if(err!=cudaSuccess)
{
cout<<"error copying device instance"<<endl;
exit(EXIT_FAILURE);
}
int *rnk_host;
int rnk[max_energy_states];
rnk_host=new int[max_energy_states];
for(long long int i=0;i<max_energy_states;i++)
{
rnk_host[i]=0;
rnk[i]=0;
}
int *rnk_device;
err=cudaMalloc((void **)&rnk_device,max_energy_states*sizeof(int));
if(err!=cudaSuccess)
{
cout<<"error allocating device rnk"<<endl;
exit(EXIT_FAILURE);
}
err=cudaMemcpy(rnk_device, rnk_host, max_energy_states*sizeof(int), cudaMemcpyHostToDevice);
if(err!=cudaSuccess)
{
cout<<"error allocating device instance"<<endl;
exit(EXIT_FAILURE);
}
// dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
int num_blocks=(max_energy_states+511)/512;
//while(1)
//{
findMeans<<<48,256>>>(d_clus, device_instances, rnk_device, max_energy_states, MAX_CLUSTER_SIZE, max_atoms);
err=cudaMemcpy(rnk_host, rnk_device, max_energy_states*sizeof(int), cudaMemcpyDeviceToHost);
if(err!=cudaSuccess)
{
cout<<"error allocating device rnk"<<endl;
exit(EXIT_FAILURE);
}
//while(1)
//{
//E - Step
for(long long int j=0;j<max_energy_states;j++)
{
double min=0.0;
for(long long int y=0;y<max_atoms;y++)
{
double d=0.0,s=0.0;
for(int z=0;z<3;z++)
{
d=host_instances[(y*5)+z+2]-host_cluster[((0*MAX_CLUSTER_SIZE)+y)*max_atoms +z];
s=s+(d*d);
}
s=sqrt(s);
min+=s;
}
cls=-1;
for(int clus=0;clus<MAX_CLUSTER_SIZE;clus++)
{
double val=0.0;
for(long long int p=0;p<max_atoms;p++)
{
double d=0.0,s=0.0;
for(int l=0;l<3;l++)
{
d=data[j*max_atoms+p][l+2]-host_cluster[((clus*MAX_CLUSTER_SIZE)+p)*max_atoms +l];
s=s+(d*d);
}
s=sqrt(s);
val+=s;
}
if(val<=min)
{
cls=clus;
min=val;
}
}
rnk[j]=cls+1;
}
int flag=0;
for(long int i=0;i<100;i++)
{
if(rnk[i]!=rnk_host[i])
flag=1;
cout<<rnk[i]<<"\t"<<rnk_host[i]<<endl;
}
if(flag==1)
cout<<"Not Matching"<<endl;
/*
//M - Step
int sizes[MAX_CLUSTER_SIZE];
for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++)
sizes[jj]=0;
double means[MAX_CLUSTER_SIZE][max_atoms][3];
for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++)
{
for(long long int kk=0;kk<max_atoms;kk++)
{
for(int ll=0;ll<3;ll++)
{
means[jj][kk][ll]=0;
}
}
}
for(long long int jj=0;jj<max_energy_states;jj++)
{
for(long long int kk=0;kk<max_atoms;kk++)
{
for(int ll=0;ll<3;ll++)
{
double d=data[jj*max_atoms+kk][ll+2];
means[rnk[jj]-1][kk][ll]+=d;
sizes[rnk[jj]-1]++;
}
}
}
for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++)
{
for(long long int kk=0;kk<max_atoms;kk++)
{
for(int ll=0;ll<3;ll++)
{
if(cluster[jj][kk][ll]!=means[jj][kk][ll]/sizes[jj])
{
f=1;
}
if(sizes[jj]!=0)
cluster[jj][kk][ll]=means[jj][kk][ll]/sizes[jj];
else
cluster[jj][kk][ll]=0;
}
}
}
if(f==0)
break;
f=0;
}*/
/*for(int i=0;i<max_energy_states;i++)
{
cout<<rnk_host[i]<<" ";
}
cout<<endl;*/
return 0;
} | #include<iostream>
#include<string>
#include<malloc.h>
#include<fstream>
#include<sstream>
#include<vector>
#include<cmath>
#include<cstdio>
#include<stdlib.h>
#include<hip/hip_runtime.h>
#define MAX_CLUSTER_SIZE 6
#define BLOCK_SIZE 16
using namespace std;
typedef vector<double> record_t;
typedef vector<record_t> data_t;
long long int max_atoms=0;
long long int max_energy_states=0;
int k_clus,cls;
double cluster_lambda=0.0;
int f=0;
istream& operator >>(istream& ins, record_t &record)
{
record.clear();
string line;
getline(ins,line);
stringstream ss(line);
string field;
while(getline(ss,field,','))
{
stringstream fs(field);
double d=0.0;
fs>>d;
record.push_back(d);
}
return ins;
}
istream& operator >> (istream& ins, data_t& data)
{
data.clear();
record_t record;
while(ins>>record)
{
data.push_back(record);
}
return ins;
}
__global__ void findMeans(const float *cluster, const float *instance, int *rnk, const int num, const int clus_size, const int maxAtoms)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// int i = threadIdx.x;
if(i<num)
{
float min=0.0;
for(int j=0;j<maxAtoms;j++)
{
float d=0.0;
float s=0.0;
for(int k=0;k<3;k++)
{
d=instance[(j*5)+k+2]-cluster[((0*clus_size)+j)*maxAtoms +k];
s=s+(d*d);
}
s=sqrt(s);
min+=s;
}
int cls=-1;
for(int clus=0;clus<clus_size;clus++)
{
float val=0.0;
for(int p=0;p<maxAtoms;p++)
{
float d=0.0;
float s=0.0;
for(int l=0;l<3;l++)
{
d=instance[((i*maxAtoms+p)*5)+l+2]-cluster[((clus*clus_size)+p)*maxAtoms +l];
s=s+(d*d);
}
s=sqrt(s);
val+=s;
}
if(val<=min)
{
cls=clus;
min=val;
}
}
rnk[i]=cls+1;
}
}
int main(void)
{
hipError_t err=hipSuccess;
data_t data;
ifstream infile("alanine.csv");
infile>>data;
if(!infile.eof())
{
cout<<"Error! Please check file.\n";
return 1;
}
infile.close();
cout<<"Number of rows in file : "<<data.size()<<endl;
for(long long int i=0;i<data.size();i++)
{
if(i>0)
{
if(data[i][1]<data[i-1][1])
{
max_atoms=i;
break;
}
}
}
cout<<"Number of atoms : "<<max_atoms<<endl;
max_energy_states=data[data.size()-1][0];
// max_energy_states=32;
cout<<"Total number of conformations : "<<max_energy_states<<endl;
//allocate host cluster
int s = MAX_CLUSTER_SIZE*max_atoms*3;
size_t sz = s*sizeof(float);
float *host_cluster=(float *)malloc(sz);
if(host_cluster==NULL)
cout<<"Not able to allocate host cluster"<<endl;
//Initializing host cluster
for(int i=0;i<MAX_CLUSTER_SIZE;i++)
{
int clus_num=rand()%max_energy_states + 0;
for(long long int ii=0;ii<max_atoms;ii++)
{
for(int jj=0;jj<3;jj++)
{
host_cluster[((i*MAX_CLUSTER_SIZE)+ii)*max_atoms+jj]=data[clus_num*max_atoms+ii][jj+2];
}
}
}
//allocate device cluster
float *d_clus = NULL;
hipMalloc((void **)&d_clus, sz);
if(err!=hipSuccess)
{
cout<<"Failed to allocate cluster on device!\n";
exit(EXIT_FAILURE);
}
//initializing device cluster
hipMemcpy(d_clus, host_cluster, sz, hipMemcpyHostToDevice);
if(err!=hipSuccess)
{
cout<<"Failed to copy cluster to device\n";
exit(EXIT_FAILURE);
}
//allocate host instance
float *host_instances;
long int row=data.size();
int col=5;
long int size=row*col;
host_instances=new float[size];
for(long long int i=0;i<row;i++)
{
for(int j=0;j<col;j++)
{
host_instances[i*col+j]=data[i][j];
}
}
//allocate device instance
float *device_instances;
err=hipMalloc((void **)&device_instances,size*sizeof(float));
if(err!=hipSuccess)
{
cout<<"error allocating device instance"<<endl;
exit(EXIT_FAILURE);
}
err=hipMemcpy(device_instances, host_instances, size*sizeof(float), hipMemcpyHostToDevice);
if(err!=hipSuccess)
{
cout<<"error copying device instance"<<endl;
exit(EXIT_FAILURE);
}
int *rnk_host;
int rnk[max_energy_states];
rnk_host=new int[max_energy_states];
for(long long int i=0;i<max_energy_states;i++)
{
rnk_host[i]=0;
rnk[i]=0;
}
int *rnk_device;
err=hipMalloc((void **)&rnk_device,max_energy_states*sizeof(int));
if(err!=hipSuccess)
{
cout<<"error allocating device rnk"<<endl;
exit(EXIT_FAILURE);
}
err=hipMemcpy(rnk_device, rnk_host, max_energy_states*sizeof(int), hipMemcpyHostToDevice);
if(err!=hipSuccess)
{
cout<<"error allocating device instance"<<endl;
exit(EXIT_FAILURE);
}
// dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
int num_blocks=(max_energy_states+511)/512;
//while(1)
//{
findMeans<<<48,256>>>(d_clus, device_instances, rnk_device, max_energy_states, MAX_CLUSTER_SIZE, max_atoms);
err=hipMemcpy(rnk_host, rnk_device, max_energy_states*sizeof(int), hipMemcpyDeviceToHost);
if(err!=hipSuccess)
{
cout<<"error allocating device rnk"<<endl;
exit(EXIT_FAILURE);
}
//while(1)
//{
//E - Step
for(long long int j=0;j<max_energy_states;j++)
{
double min=0.0;
for(long long int y=0;y<max_atoms;y++)
{
double d=0.0,s=0.0;
for(int z=0;z<3;z++)
{
d=host_instances[(y*5)+z+2]-host_cluster[((0*MAX_CLUSTER_SIZE)+y)*max_atoms +z];
s=s+(d*d);
}
s=sqrt(s);
min+=s;
}
cls=-1;
for(int clus=0;clus<MAX_CLUSTER_SIZE;clus++)
{
double val=0.0;
for(long long int p=0;p<max_atoms;p++)
{
double d=0.0,s=0.0;
for(int l=0;l<3;l++)
{
d=data[j*max_atoms+p][l+2]-host_cluster[((clus*MAX_CLUSTER_SIZE)+p)*max_atoms +l];
s=s+(d*d);
}
s=sqrt(s);
val+=s;
}
if(val<=min)
{
cls=clus;
min=val;
}
}
rnk[j]=cls+1;
}
int flag=0;
for(long int i=0;i<100;i++)
{
if(rnk[i]!=rnk_host[i])
flag=1;
cout<<rnk[i]<<"\t"<<rnk_host[i]<<endl;
}
if(flag==1)
cout<<"Not Matching"<<endl;
/*
//M - Step
int sizes[MAX_CLUSTER_SIZE];
for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++)
sizes[jj]=0;
double means[MAX_CLUSTER_SIZE][max_atoms][3];
for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++)
{
for(long long int kk=0;kk<max_atoms;kk++)
{
for(int ll=0;ll<3;ll++)
{
means[jj][kk][ll]=0;
}
}
}
for(long long int jj=0;jj<max_energy_states;jj++)
{
for(long long int kk=0;kk<max_atoms;kk++)
{
for(int ll=0;ll<3;ll++)
{
double d=data[jj*max_atoms+kk][ll+2];
means[rnk[jj]-1][kk][ll]+=d;
sizes[rnk[jj]-1]++;
}
}
}
for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++)
{
for(long long int kk=0;kk<max_atoms;kk++)
{
for(int ll=0;ll<3;ll++)
{
if(cluster[jj][kk][ll]!=means[jj][kk][ll]/sizes[jj])
{
f=1;
}
if(sizes[jj]!=0)
cluster[jj][kk][ll]=means[jj][kk][ll]/sizes[jj];
else
cluster[jj][kk][ll]=0;
}
}
}
if(f==0)
break;
f=0;
}*/
/*for(int i=0;i<max_energy_states;i++)
{
cout<<rnk_host[i]<<" ";
}
cout<<endl;*/
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<iostream>
#include<string>
#include<malloc.h>
#include<fstream>
#include<sstream>
#include<vector>
#include<cmath>
#include<cstdio>
#include<stdlib.h>
#include<hip/hip_runtime.h>
#define MAX_CLUSTER_SIZE 6
#define BLOCK_SIZE 16
using namespace std;
typedef vector<double> record_t;
typedef vector<record_t> data_t;
long long int max_atoms=0;
long long int max_energy_states=0;
int k_clus,cls;
double cluster_lambda=0.0;
int f=0;
istream& operator >>(istream& ins, record_t &record)
{
record.clear();
string line;
getline(ins,line);
stringstream ss(line);
string field;
while(getline(ss,field,','))
{
stringstream fs(field);
double d=0.0;
fs>>d;
record.push_back(d);
}
return ins;
}
istream& operator >> (istream& ins, data_t& data)
{
data.clear();
record_t record;
while(ins>>record)
{
data.push_back(record);
}
return ins;
}
__global__ void findMeans(const float *cluster, const float *instance, int *rnk, const int num, const int clus_size, const int maxAtoms)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// int i = threadIdx.x;
if(i<num)
{
float min=0.0;
for(int j=0;j<maxAtoms;j++)
{
float d=0.0;
float s=0.0;
for(int k=0;k<3;k++)
{
d=instance[(j*5)+k+2]-cluster[((0*clus_size)+j)*maxAtoms +k];
s=s+(d*d);
}
s=sqrt(s);
min+=s;
}
int cls=-1;
for(int clus=0;clus<clus_size;clus++)
{
float val=0.0;
for(int p=0;p<maxAtoms;p++)
{
float d=0.0;
float s=0.0;
for(int l=0;l<3;l++)
{
d=instance[((i*maxAtoms+p)*5)+l+2]-cluster[((clus*clus_size)+p)*maxAtoms +l];
s=s+(d*d);
}
s=sqrt(s);
val+=s;
}
if(val<=min)
{
cls=clus;
min=val;
}
}
rnk[i]=cls+1;
}
}
int main(void)
{
hipError_t err=hipSuccess;
data_t data;
ifstream infile("alanine.csv");
infile>>data;
if(!infile.eof())
{
cout<<"Error! Please check file.\n";
return 1;
}
infile.close();
cout<<"Number of rows in file : "<<data.size()<<endl;
for(long long int i=0;i<data.size();i++)
{
if(i>0)
{
if(data[i][1]<data[i-1][1])
{
max_atoms=i;
break;
}
}
}
cout<<"Number of atoms : "<<max_atoms<<endl;
max_energy_states=data[data.size()-1][0];
// max_energy_states=32;
cout<<"Total number of conformations : "<<max_energy_states<<endl;
//allocate host cluster
int s = MAX_CLUSTER_SIZE*max_atoms*3;
size_t sz = s*sizeof(float);
float *host_cluster=(float *)malloc(sz);
if(host_cluster==NULL)
cout<<"Not able to allocate host cluster"<<endl;
//Initializing host cluster
for(int i=0;i<MAX_CLUSTER_SIZE;i++)
{
int clus_num=rand()%max_energy_states + 0;
for(long long int ii=0;ii<max_atoms;ii++)
{
for(int jj=0;jj<3;jj++)
{
host_cluster[((i*MAX_CLUSTER_SIZE)+ii)*max_atoms+jj]=data[clus_num*max_atoms+ii][jj+2];
}
}
}
//allocate device cluster
float *d_clus = NULL;
hipMalloc((void **)&d_clus, sz);
if(err!=hipSuccess)
{
cout<<"Failed to allocate cluster on device!\n";
exit(EXIT_FAILURE);
}
//initializing device cluster
hipMemcpy(d_clus, host_cluster, sz, hipMemcpyHostToDevice);
if(err!=hipSuccess)
{
cout<<"Failed to copy cluster to device\n";
exit(EXIT_FAILURE);
}
//allocate host instance
float *host_instances;
long int row=data.size();
int col=5;
long int size=row*col;
host_instances=new float[size];
for(long long int i=0;i<row;i++)
{
for(int j=0;j<col;j++)
{
host_instances[i*col+j]=data[i][j];
}
}
//allocate device instance
float *device_instances;
err=hipMalloc((void **)&device_instances,size*sizeof(float));
if(err!=hipSuccess)
{
cout<<"error allocating device instance"<<endl;
exit(EXIT_FAILURE);
}
err=hipMemcpy(device_instances, host_instances, size*sizeof(float), hipMemcpyHostToDevice);
if(err!=hipSuccess)
{
cout<<"error copying device instance"<<endl;
exit(EXIT_FAILURE);
}
int *rnk_host;
int rnk[max_energy_states];
rnk_host=new int[max_energy_states];
for(long long int i=0;i<max_energy_states;i++)
{
rnk_host[i]=0;
rnk[i]=0;
}
int *rnk_device;
err=hipMalloc((void **)&rnk_device,max_energy_states*sizeof(int));
if(err!=hipSuccess)
{
cout<<"error allocating device rnk"<<endl;
exit(EXIT_FAILURE);
}
err=hipMemcpy(rnk_device, rnk_host, max_energy_states*sizeof(int), hipMemcpyHostToDevice);
if(err!=hipSuccess)
{
cout<<"error allocating device instance"<<endl;
exit(EXIT_FAILURE);
}
// dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
int num_blocks=(max_energy_states+511)/512;
//while(1)
//{
findMeans<<<48,256>>>(d_clus, device_instances, rnk_device, max_energy_states, MAX_CLUSTER_SIZE, max_atoms);
err=hipMemcpy(rnk_host, rnk_device, max_energy_states*sizeof(int), hipMemcpyDeviceToHost);
if(err!=hipSuccess)
{
cout<<"error allocating device rnk"<<endl;
exit(EXIT_FAILURE);
}
//while(1)
//{
//E - Step
for(long long int j=0;j<max_energy_states;j++)
{
double min=0.0;
for(long long int y=0;y<max_atoms;y++)
{
double d=0.0,s=0.0;
for(int z=0;z<3;z++)
{
d=host_instances[(y*5)+z+2]-host_cluster[((0*MAX_CLUSTER_SIZE)+y)*max_atoms +z];
s=s+(d*d);
}
s=sqrt(s);
min+=s;
}
cls=-1;
for(int clus=0;clus<MAX_CLUSTER_SIZE;clus++)
{
double val=0.0;
for(long long int p=0;p<max_atoms;p++)
{
double d=0.0,s=0.0;
for(int l=0;l<3;l++)
{
d=data[j*max_atoms+p][l+2]-host_cluster[((clus*MAX_CLUSTER_SIZE)+p)*max_atoms +l];
s=s+(d*d);
}
s=sqrt(s);
val+=s;
}
if(val<=min)
{
cls=clus;
min=val;
}
}
rnk[j]=cls+1;
}
int flag=0;
for(long int i=0;i<100;i++)
{
if(rnk[i]!=rnk_host[i])
flag=1;
cout<<rnk[i]<<"\t"<<rnk_host[i]<<endl;
}
if(flag==1)
cout<<"Not Matching"<<endl;
/*
//M - Step
int sizes[MAX_CLUSTER_SIZE];
for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++)
sizes[jj]=0;
double means[MAX_CLUSTER_SIZE][max_atoms][3];
for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++)
{
for(long long int kk=0;kk<max_atoms;kk++)
{
for(int ll=0;ll<3;ll++)
{
means[jj][kk][ll]=0;
}
}
}
for(long long int jj=0;jj<max_energy_states;jj++)
{
for(long long int kk=0;kk<max_atoms;kk++)
{
for(int ll=0;ll<3;ll++)
{
double d=data[jj*max_atoms+kk][ll+2];
means[rnk[jj]-1][kk][ll]+=d;
sizes[rnk[jj]-1]++;
}
}
}
for(int jj=0;jj<MAX_CLUSTER_SIZE;jj++)
{
for(long long int kk=0;kk<max_atoms;kk++)
{
for(int ll=0;ll<3;ll++)
{
if(cluster[jj][kk][ll]!=means[jj][kk][ll]/sizes[jj])
{
f=1;
}
if(sizes[jj]!=0)
cluster[jj][kk][ll]=means[jj][kk][ll]/sizes[jj];
else
cluster[jj][kk][ll]=0;
}
}
}
if(f==0)
break;
f=0;
}*/
/*for(int i=0;i<max_energy_states;i++)
{
cout<<rnk_host[i]<<" ";
}
cout<<endl;*/
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9findMeansPKfS0_Piiii
.globl _Z9findMeansPKfS0_Piiii
.p2align 8
.type _Z9findMeansPKfS0_Piiii,@function
_Z9findMeansPKfS0_Piiii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_18
s_clause 0x1
s_load_b32 s10, s[0:1], 0x20
s_load_b128 s[4:7], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s10, 1
s_cbranch_scc1 .LBB0_6
v_mov_b32_e32 v0, 0
s_mov_b32 s9, 0
s_add_u32 s11, s6, 8
s_addc_u32 s12, s7, 0
s_mov_b32 s8, s9
s_mov_b32 s13, s9
.LBB0_3:
s_lshl_b64 s[2:3], s[8:9], 2
v_mov_b32_e32 v2, 0
s_add_u32 s14, s4, s2
s_addc_u32 s15, s5, s3
s_mov_b64 s[2:3], 0
.LBB0_4:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s11, s2
s_addc_u32 s17, s12, s3
s_add_u32 s18, s14, s2
s_addc_u32 s19, s15, s3
s_load_b32 s16, s[16:17], 0x0
s_load_b32 s17, s[18:19], 0x0
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmp_eq_u32 s2, 12
s_waitcnt lgkmcnt(0)
v_sub_f32_e64 v3, s16, s17
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v2, v3, v3
s_cbranch_scc0 .LBB0_4
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v3, 0x4f800000, v2
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v2
s_add_i32 s13, s13, 1
s_add_i32 s8, s8, s10
s_add_u32 s11, s11, 20
s_addc_u32 s12, s12, 0
v_cndmask_b32_e32 v2, v2, v3, vcc_lo
s_cmp_eq_u32 s13, s10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_sqrt_f32_e32 v3, v2
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v4, -1, v3
v_add_nc_u32_e32 v5, 1, v3
v_fma_f32 v6, -v4, v3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v7, -v5, v3, v2
v_cmp_ge_f32_e64 s2, 0, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v3, v3, v4, s2
v_cmp_lt_f32_e64 s2, 0, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e64 v3, v3, v5, s2
v_mul_f32_e32 v4, 0x37800000, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v3, v3, v4, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v2, 0x260
v_cndmask_b32_e32 v2, v3, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v0, v0, v2
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_7
.LBB0_6:
v_mov_b32_e32 v0, 0
.LBB0_7:
s_load_b32 s11, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s11, 1
s_cbranch_scc1 .LBB0_16
v_mul_lo_u32 v2, s10, v1
s_cmp_gt_i32 s10, 0
v_mov_b32_e32 v4, -1
s_cselect_b32 s13, -1, 0
s_add_u32 s6, s6, 8
s_mov_b32 s9, 0
s_mul_i32 s12, s10, s11
s_addc_u32 s7, s7, 0
v_lshl_add_u32 v5, v2, 2, v2
s_mov_b32 s14, 0
s_mov_b32 s15, 0
s_branch .LBB0_10
.LBB0_9:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
v_cmp_nle_f32_e32 vcc_lo, v6, v0
s_add_i32 s14, s14, s12
v_cndmask_b32_e32 v0, v6, v0, vcc_lo
v_cndmask_b32_e32 v4, s15, v4, vcc_lo
s_add_i32 s15, s15, 1
s_cmp_eq_u32 s15, s11
s_cbranch_scc1 .LBB0_15
.LBB0_10:
v_mov_b32_e32 v6, 0
s_and_not1_b32 vcc_lo, exec_lo, s13
s_cbranch_vccnz .LBB0_9
v_mov_b32_e32 v2, v5
s_mov_b32 s16, 0
s_mov_b32 s8, s14
.LBB0_12:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v3, 31, v2
s_lshl_b64 s[2:3], s[8:9], 2
s_add_u32 s17, s4, s2
s_addc_u32 s18, s5, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[7:8], 2, v[2:3]
v_mov_b32_e32 v3, 0
s_mov_b64 s[2:3], 0
v_add_co_u32 v7, vcc_lo, s6, v7
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v8, vcc_lo
.LBB0_13:
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v9, vcc_lo, v7, s2
v_add_co_ci_u32_e32 v10, vcc_lo, s3, v8, vcc_lo
s_add_u32 s20, s17, s2
s_addc_u32 s21, s18, s3
s_add_u32 s2, s2, 4
global_load_b32 v9, v[9:10], off
s_load_b32 s19, s[20:21], 0x0
s_addc_u32 s3, s3, 0
s_cmp_eq_u32 s2, 12
s_waitcnt vmcnt(0) lgkmcnt(0)
v_subrev_f32_e32 v9, s19, v9
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v3, v9, v9
s_cbranch_scc0 .LBB0_13
s_delay_alu instid0(VALU_DEP_1)
v_dual_mul_f32 v7, 0x4f800000, v3 :: v_dual_add_nc_u32 v2, 5, v2
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v3
s_add_i32 s16, s16, 1
s_add_i32 s8, s8, s10
s_cmp_eq_u32 s16, s10
v_cndmask_b32_e32 v3, v3, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_sqrt_f32_e32 v7, v3
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v8, -1, v7
v_add_nc_u32_e32 v9, 1, v7
v_fma_f32 v10, -v8, v7, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v11, -v9, v7, v3
v_cmp_ge_f32_e64 s2, 0, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v7, v7, v8, s2
v_cmp_lt_f32_e64 s2, 0, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e64 v7, v7, v9, s2
v_mul_f32_e32 v8, 0x37800000, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v7, v7, v8, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v3, 0x260
v_cndmask_b32_e32 v3, v7, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v6, v6, v3
s_cbranch_scc0 .LBB0_12
s_branch .LBB0_9
.LBB0_15:
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v0, 1, v4
s_branch .LBB0_17
.LBB0_16:
v_mov_b32_e32 v0, 0
.LBB0_17:
s_load_b64 s[0:1], s[0:1], 0x10
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s0, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[1:2], v0, off
.LBB0_18:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9findMeansPKfS0_Piiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 22
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9findMeansPKfS0_Piiii, .Lfunc_end0-_Z9findMeansPKfS0_Piiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9findMeansPKfS0_Piiii
.private_segment_fixed_size: 0
.sgpr_count: 24
.sgpr_spill_count: 0
.symbol: _Z9findMeansPKfS0_Piiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<stdio.h>
#include<iostream>
__global__ void foo() {}
int main() {
foo<<<1,1>>>();
std::cout<<"The Result is "
<<cudaGetErrorString(cudaGetLastError())<<std::endl;
return 0;
} | code for sm_80
Function : _Z3foov
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
#include<iostream>
__global__ void foo() {}
int main() {
foo<<<1,1>>>();
std::cout<<"The Result is "
<<cudaGetErrorString(cudaGetLastError())<<std::endl;
return 0;
} | .file "tmpxft_00121c8f_00000000-6_test.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z21__device_stub__Z3foovv
.type _Z21__device_stub__Z3foovv, @function
_Z21__device_stub__Z3foovv:
.LFB3694:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z3foov(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z21__device_stub__Z3foovv, .-_Z21__device_stub__Z3foovv
.globl _Z3foov
.type _Z3foov, @function
_Z3foov:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z21__device_stub__Z3foovv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3foov, .-_Z3foov
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "The Result is "
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $32, %rsp
.cfi_def_cfa_offset 48
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbx
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %eax
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z21__device_stub__Z3foovv
jmp .L12
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z3foov"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3foov(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
#include<iostream>
__global__ void foo() {}
int main() {
foo<<<1,1>>>();
std::cout<<"The Result is "
<<cudaGetErrorString(cudaGetLastError())<<std::endl;
return 0;
} | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<iostream>
__global__ void foo() {}
int main() {
foo<<<1,1>>>();
std::cout<<"The Result is "
<<hipGetErrorString(hipGetLastError())<<std::endl;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<iostream>
__global__ void foo() {}
int main() {
foo<<<1,1>>>();
std::cout<<"The Result is "
<<hipGetErrorString(hipGetLastError())<<std::endl;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3foov
.globl _Z3foov
.p2align 8
.type _Z3foov,@function
_Z3foov:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3foov
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3foov, .Lfunc_end0-_Z3foov
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3foov
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z3foov.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<iostream>
__global__ void foo() {}
int main() {
foo<<<1,1>>>();
std::cout<<"The Result is "
<<hipGetErrorString(hipGetLastError())<<std::endl;
return 0;
} | .text
.file "test.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__foov # -- Begin function _Z18__device_stub__foov
.p2align 4, 0x90
.type _Z18__device_stub__foov,@function
_Z18__device_stub__foov: # @_Z18__device_stub__foov
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3foov, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z18__device_stub__foov, .Lfunc_end0-_Z18__device_stub__foov
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $64, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -16
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3foov, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
callq hipGetLastError
movl %eax, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB1_3
# %bb.4:
movq %rax, %rdi
movq %rax, %rbx
callq strlen
movl $_ZSt4cout, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB1_5
.LBB1_3:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB1_5: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB1_10
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_8
# %bb.7:
movzbl 67(%rbx), %eax
jmp .LBB1_9
.LBB1_8:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $64, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB1_10:
.cfi_def_cfa_offset 80
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3foov, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3foov,@object # @_Z3foov
.section .rodata,"a",@progbits
.globl _Z3foov
.p2align 3, 0x0
_Z3foov:
.quad _Z18__device_stub__foov
.size _Z3foov, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "The Result is "
.size .L.str, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3foov"
.size .L__unnamed_1, 8
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__foov
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3foov
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3foov
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3foov
.globl _Z3foov
.p2align 8
.type _Z3foov,@function
_Z3foov:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3foov
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3foov, .Lfunc_end0-_Z3foov
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3foov
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z3foov.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00121c8f_00000000-6_test.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z21__device_stub__Z3foovv
.type _Z21__device_stub__Z3foovv, @function
_Z21__device_stub__Z3foovv:
.LFB3694:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z3foov(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z21__device_stub__Z3foovv, .-_Z21__device_stub__Z3foovv
.globl _Z3foov
.type _Z3foov, @function
_Z3foov:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z21__device_stub__Z3foovv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3foov, .-_Z3foov
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "The Result is "
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $32, %rsp
.cfi_def_cfa_offset 48
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbx
call cudaGetLastError@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %eax
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z21__device_stub__Z3foovv
jmp .L12
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z3foov"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3foov(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "test.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__foov # -- Begin function _Z18__device_stub__foov
.p2align 4, 0x90
.type _Z18__device_stub__foov,@function
_Z18__device_stub__foov: # @_Z18__device_stub__foov
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3foov, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z18__device_stub__foov, .Lfunc_end0-_Z18__device_stub__foov
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $64, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -16
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3foov, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
callq hipGetLastError
movl %eax, %edi
callq hipGetErrorString
testq %rax, %rax
je .LBB1_3
# %bb.4:
movq %rax, %rdi
movq %rax, %rbx
callq strlen
movl $_ZSt4cout, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB1_5
.LBB1_3:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB1_5: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB1_10
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_8
# %bb.7:
movzbl 67(%rbx), %eax
jmp .LBB1_9
.LBB1_8:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $64, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB1_10:
.cfi_def_cfa_offset 80
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3foov, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3foov,@object # @_Z3foov
.section .rodata,"a",@progbits
.globl _Z3foov
.p2align 3, 0x0
_Z3foov:
.quad _Z18__device_stub__foov
.size _Z3foov, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "The Result is "
.size .L.str, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3foov"
.size .L__unnamed_1, 8
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__foov
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3foov
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #ifndef _LIST_CU_CUDA
#define _LIST_CU_CUDA
template <typename T>
struct Node {
T data;
Node *next;
};
template <typename T> class List {
private:
Node<T> *head;
int count = 0;
public:
__device__ __host__ List() {
head = NULL;
}
__device__ __host__ bool isEmpty()
{
return (count == 0);
}
__device__ __host__ void push(T val) {
Node<T> *n = new Node<T>();
n->data = val;
n->next = head;
head = n;
count++;
}
__device__ __host__ T pop() {
if (isEmpty())
{
return NULL;
}
else
{
if (head) {
T p = head->data;
head = head->next;
count--;
return p;
}
}
}
__device__ __host__ bool search(T val) {
Node<T> *temp = head;
while (temp->next) {
if (temp->data == val) return true;
else temp = temp->next;
}
delete temp;
return false;
}
__device__ __host__ int size()
{
return count;
}
__device__ __host__ T operator[](int index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
__device__ __host__ T get_index(int index)
{
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
/*__device__ __host__ T operator[](int & index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}*/
};
#endif //_LIST_CU_CUDA | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #ifndef _LIST_CU_CUDA
#define _LIST_CU_CUDA
template <typename T>
struct Node {
T data;
Node *next;
};
template <typename T> class List {
private:
Node<T> *head;
int count = 0;
public:
__device__ __host__ List() {
head = NULL;
}
__device__ __host__ bool isEmpty()
{
return (count == 0);
}
__device__ __host__ void push(T val) {
Node<T> *n = new Node<T>();
n->data = val;
n->next = head;
head = n;
count++;
}
__device__ __host__ T pop() {
if (isEmpty())
{
return NULL;
}
else
{
if (head) {
T p = head->data;
head = head->next;
count--;
return p;
}
}
}
__device__ __host__ bool search(T val) {
Node<T> *temp = head;
while (temp->next) {
if (temp->data == val) return true;
else temp = temp->next;
}
delete temp;
return false;
}
__device__ __host__ int size()
{
return count;
}
__device__ __host__ T operator[](int index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
__device__ __host__ T get_index(int index)
{
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
/*__device__ __host__ T operator[](int & index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}*/
};
#endif //_LIST_CU_CUDA | .file "tmpxft_00078055_00000000-6_list.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2037:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2037:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #ifndef _LIST_CU_CUDA
#define _LIST_CU_CUDA
template <typename T>
struct Node {
T data;
Node *next;
};
template <typename T> class List {
private:
Node<T> *head;
int count = 0;
public:
__device__ __host__ List() {
head = NULL;
}
__device__ __host__ bool isEmpty()
{
return (count == 0);
}
__device__ __host__ void push(T val) {
Node<T> *n = new Node<T>();
n->data = val;
n->next = head;
head = n;
count++;
}
__device__ __host__ T pop() {
if (isEmpty())
{
return NULL;
}
else
{
if (head) {
T p = head->data;
head = head->next;
count--;
return p;
}
}
}
__device__ __host__ bool search(T val) {
Node<T> *temp = head;
while (temp->next) {
if (temp->data == val) return true;
else temp = temp->next;
}
delete temp;
return false;
}
__device__ __host__ int size()
{
return count;
}
__device__ __host__ T operator[](int index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
__device__ __host__ T get_index(int index)
{
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
/*__device__ __host__ T operator[](int & index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}*/
};
#endif //_LIST_CU_CUDA | #ifndef _LIST_CU_CUDA
#define _LIST_CU_CUDA
template <typename T>
struct Node {
T data;
Node *next;
};
template <typename T> class List {
private:
Node<T> *head;
int count = 0;
public:
__device__ __host__ List() {
head = NULL;
}
__device__ __host__ bool isEmpty()
{
return (count == 0);
}
__device__ __host__ void push(T val) {
Node<T> *n = new Node<T>();
n->data = val;
n->next = head;
head = n;
count++;
}
__device__ __host__ T pop() {
if (isEmpty())
{
return NULL;
}
else
{
if (head) {
T p = head->data;
head = head->next;
count--;
return p;
}
}
}
__device__ __host__ bool search(T val) {
Node<T> *temp = head;
while (temp->next) {
if (temp->data == val) return true;
else temp = temp->next;
}
delete temp;
return false;
}
__device__ __host__ int size()
{
return count;
}
__device__ __host__ T operator[](int index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
__device__ __host__ T get_index(int index)
{
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
/*__device__ __host__ T operator[](int & index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}*/
};
#endif //_LIST_CU_CUDA |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #ifndef _LIST_CU_CUDA
#define _LIST_CU_CUDA
template <typename T>
struct Node {
T data;
Node *next;
};
template <typename T> class List {
private:
Node<T> *head;
int count = 0;
public:
__device__ __host__ List() {
head = NULL;
}
__device__ __host__ bool isEmpty()
{
return (count == 0);
}
__device__ __host__ void push(T val) {
Node<T> *n = new Node<T>();
n->data = val;
n->next = head;
head = n;
count++;
}
__device__ __host__ T pop() {
if (isEmpty())
{
return NULL;
}
else
{
if (head) {
T p = head->data;
head = head->next;
count--;
return p;
}
}
}
__device__ __host__ bool search(T val) {
Node<T> *temp = head;
while (temp->next) {
if (temp->data == val) return true;
else temp = temp->next;
}
delete temp;
return false;
}
__device__ __host__ int size()
{
return count;
}
__device__ __host__ T operator[](int index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
__device__ __host__ T get_index(int index)
{
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
/*__device__ __host__ T operator[](int & index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}*/
};
#endif //_LIST_CU_CUDA | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #ifndef _LIST_CU_CUDA
#define _LIST_CU_CUDA
template <typename T>
struct Node {
T data;
Node *next;
};
template <typename T> class List {
private:
Node<T> *head;
int count = 0;
public:
__device__ __host__ List() {
head = NULL;
}
__device__ __host__ bool isEmpty()
{
return (count == 0);
}
__device__ __host__ void push(T val) {
Node<T> *n = new Node<T>();
n->data = val;
n->next = head;
head = n;
count++;
}
__device__ __host__ T pop() {
if (isEmpty())
{
return NULL;
}
else
{
if (head) {
T p = head->data;
head = head->next;
count--;
return p;
}
}
}
__device__ __host__ bool search(T val) {
Node<T> *temp = head;
while (temp->next) {
if (temp->data == val) return true;
else temp = temp->next;
}
delete temp;
return false;
}
__device__ __host__ int size()
{
return count;
}
__device__ __host__ T operator[](int index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
__device__ __host__ T get_index(int index)
{
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}
/*__device__ __host__ T operator[](int & index) {
int itr = 0;
Node<T> *temp = head;
while (itr != index && temp->next)
{
temp = temp->next;
itr++;
}
return temp->data;
}*/
};
#endif //_LIST_CU_CUDA | .text
.file "list.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00078055_00000000-6_list.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2037:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2037:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "list.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <time.h>
using namespace std;
//prodotto puntuale tra 2 matrici, usando il padding di memoria (pitch)
//input size matrici (m,n) , dimensioni blocchi (righe di thread, colonne di thread)
__host__
void inizializzaCPU(int *a,int m,int n){
srand((unsigned int)time(NULL));
for(int i=0;i<m;i++)
for(int j=0;j<n;j++)
a[i*n+j]=1+rand()%10;
}
__host__
void stampaCPU(int *a,int m,int n){
cout<<"--------------------------------"<<endl;
for(int i=0;i<m;i++){
for(int j=0;j<n;j++)
cout<<a[i*n+j]<<" ";
cout<<endl;
}
}
__global__
void calcolaProdPuntuale(int *a,int *b,int *c,int m,int n,int pitch){
//attenzione a come si accede alle matrici allocate in memoria gpu perchè c'e' il pitch
int nColonneEffettive = pitch/sizeof(int); //perchè il pitch è la lunghezza effettiva delle righe matrici allocate in memoria gpu, ma in bytes
int iRiga = threadIdx.x + blockIdx.x * blockDim.x; //indice riga del thread GLOBALMENTE alla griglia
int iCol = threadIdx.y + blockIdx.y * blockDim.y ; //indice colonna del thread GLOBALMENTE alla griglia
if(iRiga >= m || iCol >=n)
return;
c[iRiga * nColonneEffettive + iCol] = a[iRiga * nColonneEffettive + iCol] * b[iRiga * nColonneEffettive + iCol];
}
int main(int argc,char *argv[]){
int m,n;
dim3 dimBlocco; //default constructor, qui salviamo il numero di righe e di colonne (di thread) in un blocco
dim3 dimGriglia; //qui salviamo il numero di righe e di colonne (di blocchi) della griglia
if(argc!=5){
m=5;
n=5;
dimBlocco.x=2; //2 righe di thread in un blocco
dimBlocco.y=2; //2 colonne di thread in un blocco
//quindi 4 thread in totale per un blocco
}
else{
sscanf(argv[1],"%d",&m);
sscanf(argv[2],"%d",&n);
sscanf(argv[3],"%d",& dimBlocco.x); //n righe di thread in blocco
sscanf(argv[4],"%d",& dimBlocco.y); //n di colonne di thread in blocco
}
dimGriglia.x = m / dimBlocco.x;
if(m % dimBlocco.x != 0)
dimBlocco.x ++;
dimGriglia.y = n / dimBlocco.y;
if(n % dimBlocco.y != 0)
dimBlocco.y ++;
//strutture dati su host
int *h_a,*h_b,*h_c;
//alloco
h_a=(int *)malloc(n*m*sizeof(int));
h_b=(int *)malloc(n*m*sizeof(int));
h_c=(int *)malloc(n*m*sizeof(int));
//inizializzo le matrici
inizializzaCPU(h_a,m,n);
inizializzaCPU(h_b,m,n);
stampaCPU(h_a,m,n);
stampaCPU(h_b,m,n);
//per le strutture dati sulla memoria gpu
int *d_a,*d_b,*d_c;
size_t pitch; //qui verrà salvato dalla cudaMallocPitch la lunghezza effettiva (paddata) in bytes
//ATTENZIONE CHE IL PITCH DEVE ESSERE UNSIGNED LONG
//alloco memoria sull'heap gpu usando il pitch (padding)
cudaMallocPitch(&d_a,&pitch,n*sizeof(int),m);
cudaMallocPitch((void **)&d_b,&pitch,n*sizeof(int),m);
cudaMallocPitch((void **)&d_c,&pitch,n*sizeof(int),m);
//ora in pitch c'e' la lunghezza effettiva (in bytes) delle righe
//ora devo copiare il contenuto delle matrici dalla memoria host a memoria device
//pero' la memoria device è paddata (pitch) quindi devo usare la cudaMemcpy2D per evitare
//di inserire dati in quelle che sono le celle di padding, quindi...
cudaMemcpy2D(d_a,pitch,h_a,n*sizeof(int),n*sizeof(int),m,cudaMemcpyHostToDevice);
//i parametri sono : destinazione, lunghezza effettiva in bytes delle righe in memoria device (quindi quest'informazione sta in pitch)
//origine, lunghezza in bytes delle righe nella memoria host (potrebbe esserci il pitch anche qui)
//numero di bytes da copiare nella direzione orizzontale (lunghezza bytes riga)
//numero di righe da copiare
//flag destinazione
cudaMemcpy2D(d_b,pitch,h_b,n*sizeof(int),n*sizeof(int),m,cudaMemcpyHostToDevice);
//lancio il kernel
calcolaProdPuntuale<<<dimGriglia, dimBlocco>>>(d_a,d_b,d_c,m,n,pitch);
//ricopio dalla memoria device alla memoria host
int *copyFromGPU=(int *)malloc(n*m*sizeof(int));
//usando sempre la cudaMemcpy2D perchè nell'origine ci sono celle di padding
cudaMemcpy2D(copyFromGPU,n*sizeof(int),d_c,pitch,n*sizeof(int),m,cudaMemcpyDeviceToHost);
//stampo
stampaCPU(copyFromGPU,m,n);
//libero memoria
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
} | code for sm_80
Function : _Z19calcolaProdPuntualePiS_S_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e680000002100 */
/*0040*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e620000002500 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R5, c[0x0][0x0], R0 ; /* 0x0000000005007a24 */
/* 0x002fca00078e0200 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ ULDC UR4, c[0x0][0x180] ; /* 0x0000600000047ab9 */
/* 0x000fe20000000800 */
/*00b0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00c0*/ USHF.R.S32.HI UR4, URZ, 0x2, UR4 ; /* 0x000000023f047899 */
/* 0x000fcc0008011404 */
/*00d0*/ IMAD R0, R0, UR4, R3 ; /* 0x0000000400007c24 */
/* 0x000fe2000f8e0203 */
/*00e0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00f0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*0100*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0207 */
/*0110*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0120*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0130*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*0140*/ IMAD R9, R4, R3, RZ ; /* 0x0000000304097224 */
/* 0x004fca00078e02ff */
/*0150*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <time.h>
using namespace std;
//prodotto puntuale tra 2 matrici, usando il padding di memoria (pitch)
//input size matrici (m,n) , dimensioni blocchi (righe di thread, colonne di thread)
__host__
void inizializzaCPU(int *a,int m,int n){
srand((unsigned int)time(NULL));
for(int i=0;i<m;i++)
for(int j=0;j<n;j++)
a[i*n+j]=1+rand()%10;
}
__host__
void stampaCPU(int *a,int m,int n){
cout<<"--------------------------------"<<endl;
for(int i=0;i<m;i++){
for(int j=0;j<n;j++)
cout<<a[i*n+j]<<" ";
cout<<endl;
}
}
__global__
void calcolaProdPuntuale(int *a,int *b,int *c,int m,int n,int pitch){
//attenzione a come si accede alle matrici allocate in memoria gpu perchè c'e' il pitch
int nColonneEffettive = pitch/sizeof(int); //perchè il pitch è la lunghezza effettiva delle righe matrici allocate in memoria gpu, ma in bytes
int iRiga = threadIdx.x + blockIdx.x * blockDim.x; //indice riga del thread GLOBALMENTE alla griglia
int iCol = threadIdx.y + blockIdx.y * blockDim.y ; //indice colonna del thread GLOBALMENTE alla griglia
if(iRiga >= m || iCol >=n)
return;
c[iRiga * nColonneEffettive + iCol] = a[iRiga * nColonneEffettive + iCol] * b[iRiga * nColonneEffettive + iCol];
}
int main(int argc,char *argv[]){
int m,n;
dim3 dimBlocco; //default constructor, qui salviamo il numero di righe e di colonne (di thread) in un blocco
dim3 dimGriglia; //qui salviamo il numero di righe e di colonne (di blocchi) della griglia
if(argc!=5){
m=5;
n=5;
dimBlocco.x=2; //2 righe di thread in un blocco
dimBlocco.y=2; //2 colonne di thread in un blocco
//quindi 4 thread in totale per un blocco
}
else{
sscanf(argv[1],"%d",&m);
sscanf(argv[2],"%d",&n);
sscanf(argv[3],"%d",& dimBlocco.x); //n righe di thread in blocco
sscanf(argv[4],"%d",& dimBlocco.y); //n di colonne di thread in blocco
}
dimGriglia.x = m / dimBlocco.x;
if(m % dimBlocco.x != 0)
dimBlocco.x ++;
dimGriglia.y = n / dimBlocco.y;
if(n % dimBlocco.y != 0)
dimBlocco.y ++;
//strutture dati su host
int *h_a,*h_b,*h_c;
//alloco
h_a=(int *)malloc(n*m*sizeof(int));
h_b=(int *)malloc(n*m*sizeof(int));
h_c=(int *)malloc(n*m*sizeof(int));
//inizializzo le matrici
inizializzaCPU(h_a,m,n);
inizializzaCPU(h_b,m,n);
stampaCPU(h_a,m,n);
stampaCPU(h_b,m,n);
//per le strutture dati sulla memoria gpu
int *d_a,*d_b,*d_c;
size_t pitch; //qui verrà salvato dalla cudaMallocPitch la lunghezza effettiva (paddata) in bytes
//ATTENZIONE CHE IL PITCH DEVE ESSERE UNSIGNED LONG
//alloco memoria sull'heap gpu usando il pitch (padding)
cudaMallocPitch(&d_a,&pitch,n*sizeof(int),m);
cudaMallocPitch((void **)&d_b,&pitch,n*sizeof(int),m);
cudaMallocPitch((void **)&d_c,&pitch,n*sizeof(int),m);
//ora in pitch c'e' la lunghezza effettiva (in bytes) delle righe
//ora devo copiare il contenuto delle matrici dalla memoria host a memoria device
//pero' la memoria device è paddata (pitch) quindi devo usare la cudaMemcpy2D per evitare
//di inserire dati in quelle che sono le celle di padding, quindi...
cudaMemcpy2D(d_a,pitch,h_a,n*sizeof(int),n*sizeof(int),m,cudaMemcpyHostToDevice);
//i parametri sono : destinazione, lunghezza effettiva in bytes delle righe in memoria device (quindi quest'informazione sta in pitch)
//origine, lunghezza in bytes delle righe nella memoria host (potrebbe esserci il pitch anche qui)
//numero di bytes da copiare nella direzione orizzontale (lunghezza bytes riga)
//numero di righe da copiare
//flag destinazione
cudaMemcpy2D(d_b,pitch,h_b,n*sizeof(int),n*sizeof(int),m,cudaMemcpyHostToDevice);
//lancio il kernel
calcolaProdPuntuale<<<dimGriglia, dimBlocco>>>(d_a,d_b,d_c,m,n,pitch);
//ricopio dalla memoria device alla memoria host
int *copyFromGPU=(int *)malloc(n*m*sizeof(int));
//usando sempre la cudaMemcpy2D perchè nell'origine ci sono celle di padding
cudaMemcpy2D(copyFromGPU,n*sizeof(int),d_c,pitch,n*sizeof(int),m,cudaMemcpyDeviceToHost);
//stampo
stampaCPU(copyFromGPU,m,n);
//libero memoria
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
} | .file "tmpxft_00003f3a_00000000-6_eserc10.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z14inizializzaCPUPiii
.type _Z14inizializzaCPUPiii, @function
_Z14inizializzaCPUPiii:
.LFB3669:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r15
movl %esi, %ebx
movl %esi, 4(%rsp)
movl %edx, %r14d
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
testl %ebx, %ebx
jle .L3
movl $0, %r13d
movl $0, %r12d
movslq %r14d, %rax
movq %rax, 8(%rsp)
jmp .L5
.L7:
movslq %r13d, %rax
leaq (%r15,%rax,4), %rbx
movq 8(%rsp), %rsi
addq %rsi, %rax
leaq (%r15,%rax,4), %rbp
.L6:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
addl $1, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L6
.L8:
addl $1, %r12d
addl %r14d, %r13d
cmpl %r12d, 4(%rsp)
je .L3
.L5:
testl %r14d, %r14d
jg .L7
jmp .L8
.L3:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z14inizializzaCPUPiii, .-_Z14inizializzaCPUPiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "--------------------------------"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string " "
.text
.globl _Z9stampaCPUPiii
.type _Z9stampaCPUPiii, @function
_Z9stampaCPUPiii:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 16(%rsp)
movl %esi, 12(%rsp)
movl %edx, 8(%rsp)
movl $32, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbx
testq %rbx, %rbx
je .L25
cmpb $0, 56(%rbx)
je .L13
movzbl 67(%rbx), %eax
.L14:
movsbl %al, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
cmpl $0, 12(%rsp)
jle .L11
movl $0, %r15d
movl $0, %r14d
movslq 8(%rsp), %rax
movq %rax, 24(%rsp)
leaq _ZSt4cout(%rip), %rbp
leaq .LC1(%rip), %r13
jmp .L16
.L25:
call _ZSt16__throw_bad_castv@PLT
.L13:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
jmp .L14
.L26:
call _ZSt16__throw_bad_castv@PLT
.L27:
movzbl 67(%rbx), %esi
.L20:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addl $1, %r14d
movl 8(%rsp), %eax
addl %eax, %r15d
cmpl %r14d, 12(%rsp)
je .L11
.L16:
cmpl $0, 8(%rsp)
jle .L22
movslq %r15d, %rax
movq 16(%rsp), %rcx
leaq (%rcx,%rax,4), %rbx
movq 24(%rsp), %rdx
addq %rdx, %rax
leaq (%rcx,%rax,4), %r12
.L17:
movl (%rbx), %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %r13, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L17
.L22:
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %rbx
testq %rbx, %rbx
je .L26
cmpb $0, 56(%rbx)
jne .L27
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L20
.L11:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z9stampaCPUPiii, .-_Z9stampaCPUPiii
.globl _Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii
.type _Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii, @function
_Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii:
.LFB3696:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L32
.L28:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L33
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z19calcolaProdPuntualePiS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L28
.L33:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii, .-_Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii
.globl _Z19calcolaProdPuntualePiS_S_iii
.type _Z19calcolaProdPuntualePiS_S_iii, @function
_Z19calcolaProdPuntualePiS_S_iii:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z19calcolaProdPuntualePiS_S_iii, .-_Z19calcolaProdPuntualePiS_S_iii
.section .rodata.str1.1
.LC2:
.string "%d"
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $80, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 68(%rsp)
cmpl $5, %edi
je .L37
movl $5, 8(%rsp)
movl $5, 12(%rsp)
movl $2, 52(%rsp)
movl $2, 60(%rsp)
movl $2, %ecx
movl $5, %r14d
.L38:
addl $1, %ecx
movl %ecx, 48(%rsp)
.L39:
movl 12(%rsp), %r13d
movl 52(%rsp), %ecx
movl %r13d, %eax
movl $0, %edx
divl %ecx
movl %eax, 64(%rsp)
testl %edx, %edx
je .L40
addl $1, %ecx
movl %ecx, 52(%rsp)
.L40:
movl %r13d, %ebx
imull %r14d, %ebx
movslq %ebx, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
movl %r13d, %edx
movl %r14d, %esi
movq %r12, %rdi
call _Z14inizializzaCPUPiii
movl 12(%rsp), %edx
movl 8(%rsp), %esi
movq %rbp, %rdi
call _Z14inizializzaCPUPiii
movl 12(%rsp), %edx
movl 8(%rsp), %esi
movq %r12, %rdi
call _Z9stampaCPUPiii
movl 12(%rsp), %edx
movl 8(%rsp), %esi
movq %rbp, %rdi
call _Z9stampaCPUPiii
movslq 8(%rsp), %rcx
movslq 12(%rsp), %rdx
salq $2, %rdx
leaq 40(%rsp), %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocPitch@PLT
movslq 8(%rsp), %rcx
movslq 12(%rsp), %rdx
salq $2, %rdx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocPitch@PLT
movslq 8(%rsp), %rcx
movslq 12(%rsp), %rdx
salq $2, %rdx
leaq 32(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocPitch@PLT
movslq 12(%rsp), %rcx
salq $2, %rcx
subq $8, %rsp
.cfi_def_cfa_offset 136
pushq $1
.cfi_def_cfa_offset 144
movslq 24(%rsp), %r9
movq %rcx, %r8
movq %r12, %rdx
movq 56(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy2D@PLT
movslq 28(%rsp), %rcx
salq $2, %rcx
movl $1, (%rsp)
movslq 24(%rsp), %r9
movq %rcx, %r8
movq %rbp, %rdx
movq 56(%rsp), %rsi
movq 40(%rsp), %rdi
call cudaMemcpy2D@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
movl 56(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movq 60(%rsp), %rdi
movl 68(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L44
.L41:
movl 12(%rsp), %ebx
movl 8(%rsp), %r14d
movl %ebx, %edi
imull %r14d, %edi
movslq %edi, %rdi
salq $2, %rdi
call malloc@PLT
movq %rax, %r13
movslq %ebx, %rsi
salq $2, %rsi
subq $8, %rsp
.cfi_def_cfa_offset 136
pushq $2
.cfi_def_cfa_offset 144
movslq %r14d, %r9
movq %rsi, %r8
movq 56(%rsp), %rcx
movq 48(%rsp), %rdx
movq %rax, %rdi
call cudaMemcpy2D@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
movl 12(%rsp), %edx
movl 8(%rsp), %esi
movq %r13, %rdi
call _Z9stampaCPUPiii
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L45
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
movq %rsi, %rbx
leaq 8(%rsp), %rdx
movq 8(%rsi), %rdi
leaq .LC2(%rip), %rbp
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
leaq 12(%rsp), %rdx
movq 16(%rbx), %rdi
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
leaq 48(%rsp), %rdx
movq 24(%rbx), %rdi
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
leaq 52(%rsp), %rdx
movq 32(%rbx), %rdi
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
movl 8(%rsp), %r14d
movl 48(%rsp), %ecx
movl %r14d, %eax
movl $0, %edx
divl %ecx
movl %eax, 60(%rsp)
testl %edx, %edx
je .L39
jmp .L38
.L44:
movl 40(%rsp), %r9d
movl 12(%rsp), %r8d
movl 8(%rsp), %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii
jmp .L41
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC3:
.string "_Z19calcolaProdPuntualePiS_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z19calcolaProdPuntualePiS_S_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <time.h>
using namespace std;
//prodotto puntuale tra 2 matrici, usando il padding di memoria (pitch)
//input size matrici (m,n) , dimensioni blocchi (righe di thread, colonne di thread)
__host__
void inizializzaCPU(int *a,int m,int n){
srand((unsigned int)time(NULL));
for(int i=0;i<m;i++)
for(int j=0;j<n;j++)
a[i*n+j]=1+rand()%10;
}
__host__
void stampaCPU(int *a,int m,int n){
cout<<"--------------------------------"<<endl;
for(int i=0;i<m;i++){
for(int j=0;j<n;j++)
cout<<a[i*n+j]<<" ";
cout<<endl;
}
}
__global__
void calcolaProdPuntuale(int *a,int *b,int *c,int m,int n,int pitch){
//attenzione a come si accede alle matrici allocate in memoria gpu perchè c'e' il pitch
int nColonneEffettive = pitch/sizeof(int); //perchè il pitch è la lunghezza effettiva delle righe matrici allocate in memoria gpu, ma in bytes
int iRiga = threadIdx.x + blockIdx.x * blockDim.x; //indice riga del thread GLOBALMENTE alla griglia
int iCol = threadIdx.y + blockIdx.y * blockDim.y ; //indice colonna del thread GLOBALMENTE alla griglia
if(iRiga >= m || iCol >=n)
return;
c[iRiga * nColonneEffettive + iCol] = a[iRiga * nColonneEffettive + iCol] * b[iRiga * nColonneEffettive + iCol];
}
int main(int argc,char *argv[]){
int m,n;
dim3 dimBlocco; //default constructor, qui salviamo il numero di righe e di colonne (di thread) in un blocco
dim3 dimGriglia; //qui salviamo il numero di righe e di colonne (di blocchi) della griglia
if(argc!=5){
m=5;
n=5;
dimBlocco.x=2; //2 righe di thread in un blocco
dimBlocco.y=2; //2 colonne di thread in un blocco
//quindi 4 thread in totale per un blocco
}
else{
sscanf(argv[1],"%d",&m);
sscanf(argv[2],"%d",&n);
sscanf(argv[3],"%d",& dimBlocco.x); //n righe di thread in blocco
sscanf(argv[4],"%d",& dimBlocco.y); //n di colonne di thread in blocco
}
dimGriglia.x = m / dimBlocco.x;
if(m % dimBlocco.x != 0)
dimBlocco.x ++;
dimGriglia.y = n / dimBlocco.y;
if(n % dimBlocco.y != 0)
dimBlocco.y ++;
//strutture dati su host
int *h_a,*h_b,*h_c;
//alloco
h_a=(int *)malloc(n*m*sizeof(int));
h_b=(int *)malloc(n*m*sizeof(int));
h_c=(int *)malloc(n*m*sizeof(int));
//inizializzo le matrici
inizializzaCPU(h_a,m,n);
inizializzaCPU(h_b,m,n);
stampaCPU(h_a,m,n);
stampaCPU(h_b,m,n);
//per le strutture dati sulla memoria gpu
int *d_a,*d_b,*d_c;
size_t pitch; //qui verrà salvato dalla cudaMallocPitch la lunghezza effettiva (paddata) in bytes
//ATTENZIONE CHE IL PITCH DEVE ESSERE UNSIGNED LONG
//alloco memoria sull'heap gpu usando il pitch (padding)
cudaMallocPitch(&d_a,&pitch,n*sizeof(int),m);
cudaMallocPitch((void **)&d_b,&pitch,n*sizeof(int),m);
cudaMallocPitch((void **)&d_c,&pitch,n*sizeof(int),m);
//ora in pitch c'e' la lunghezza effettiva (in bytes) delle righe
//ora devo copiare il contenuto delle matrici dalla memoria host a memoria device
//pero' la memoria device è paddata (pitch) quindi devo usare la cudaMemcpy2D per evitare
//di inserire dati in quelle che sono le celle di padding, quindi...
cudaMemcpy2D(d_a,pitch,h_a,n*sizeof(int),n*sizeof(int),m,cudaMemcpyHostToDevice);
//i parametri sono : destinazione, lunghezza effettiva in bytes delle righe in memoria device (quindi quest'informazione sta in pitch)
//origine, lunghezza in bytes delle righe nella memoria host (potrebbe esserci il pitch anche qui)
//numero di bytes da copiare nella direzione orizzontale (lunghezza bytes riga)
//numero di righe da copiare
//flag destinazione
cudaMemcpy2D(d_b,pitch,h_b,n*sizeof(int),n*sizeof(int),m,cudaMemcpyHostToDevice);
//lancio il kernel
calcolaProdPuntuale<<<dimGriglia, dimBlocco>>>(d_a,d_b,d_c,m,n,pitch);
//ricopio dalla memoria device alla memoria host
int *copyFromGPU=(int *)malloc(n*m*sizeof(int));
//usando sempre la cudaMemcpy2D perchè nell'origine ci sono celle di padding
cudaMemcpy2D(copyFromGPU,n*sizeof(int),d_c,pitch,n*sizeof(int),m,cudaMemcpyDeviceToHost);
//stampo
stampaCPU(copyFromGPU,m,n);
//libero memoria
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
} | #include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <time.h>
using namespace std;
//prodotto puntuale tra 2 matrici, usando il padding di memoria (pitch)
//input size matrici (m,n) , dimensioni blocchi (righe di thread, colonne di thread)
__host__
void inizializzaCPU(int *a,int m,int n){
srand((unsigned int)time(NULL));
for(int i=0;i<m;i++)
for(int j=0;j<n;j++)
a[i*n+j]=1+rand()%10;
}
__host__
void stampaCPU(int *a,int m,int n){
cout<<"--------------------------------"<<endl;
for(int i=0;i<m;i++){
for(int j=0;j<n;j++)
cout<<a[i*n+j]<<" ";
cout<<endl;
}
}
__global__
void calcolaProdPuntuale(int *a,int *b,int *c,int m,int n,int pitch){
//attenzione a come si accede alle matrici allocate in memoria gpu perchè c'e' il pitch
int nColonneEffettive = pitch/sizeof(int); //perchè il pitch è la lunghezza effettiva delle righe matrici allocate in memoria gpu, ma in bytes
int iRiga = threadIdx.x + blockIdx.x * blockDim.x; //indice riga del thread GLOBALMENTE alla griglia
int iCol = threadIdx.y + blockIdx.y * blockDim.y ; //indice colonna del thread GLOBALMENTE alla griglia
if(iRiga >= m || iCol >=n)
return;
c[iRiga * nColonneEffettive + iCol] = a[iRiga * nColonneEffettive + iCol] * b[iRiga * nColonneEffettive + iCol];
}
int main(int argc,char *argv[]){
int m,n;
dim3 dimBlocco; //default constructor, qui salviamo il numero di righe e di colonne (di thread) in un blocco
dim3 dimGriglia; //qui salviamo il numero di righe e di colonne (di blocchi) della griglia
if(argc!=5){
m=5;
n=5;
dimBlocco.x=2; //2 righe di thread in un blocco
dimBlocco.y=2; //2 colonne di thread in un blocco
//quindi 4 thread in totale per un blocco
}
else{
sscanf(argv[1],"%d",&m);
sscanf(argv[2],"%d",&n);
sscanf(argv[3],"%d",& dimBlocco.x); //n righe di thread in blocco
sscanf(argv[4],"%d",& dimBlocco.y); //n di colonne di thread in blocco
}
dimGriglia.x = m / dimBlocco.x;
if(m % dimBlocco.x != 0)
dimBlocco.x ++;
dimGriglia.y = n / dimBlocco.y;
if(n % dimBlocco.y != 0)
dimBlocco.y ++;
//strutture dati su host
int *h_a,*h_b,*h_c;
//alloco
h_a=(int *)malloc(n*m*sizeof(int));
h_b=(int *)malloc(n*m*sizeof(int));
h_c=(int *)malloc(n*m*sizeof(int));
//inizializzo le matrici
inizializzaCPU(h_a,m,n);
inizializzaCPU(h_b,m,n);
stampaCPU(h_a,m,n);
stampaCPU(h_b,m,n);
//per le strutture dati sulla memoria gpu
int *d_a,*d_b,*d_c;
size_t pitch; //qui verrà salvato dalla cudaMallocPitch la lunghezza effettiva (paddata) in bytes
//ATTENZIONE CHE IL PITCH DEVE ESSERE UNSIGNED LONG
//alloco memoria sull'heap gpu usando il pitch (padding)
hipMallocPitch(&d_a,&pitch,n*sizeof(int),m);
hipMallocPitch((void **)&d_b,&pitch,n*sizeof(int),m);
hipMallocPitch((void **)&d_c,&pitch,n*sizeof(int),m);
//ora in pitch c'e' la lunghezza effettiva (in bytes) delle righe
//ora devo copiare il contenuto delle matrici dalla memoria host a memoria device
//pero' la memoria device è paddata (pitch) quindi devo usare la cudaMemcpy2D per evitare
//di inserire dati in quelle che sono le celle di padding, quindi...
hipMemcpy2D(d_a,pitch,h_a,n*sizeof(int),n*sizeof(int),m,hipMemcpyHostToDevice);
//i parametri sono : destinazione, lunghezza effettiva in bytes delle righe in memoria device (quindi quest'informazione sta in pitch)
//origine, lunghezza in bytes delle righe nella memoria host (potrebbe esserci il pitch anche qui)
//numero di bytes da copiare nella direzione orizzontale (lunghezza bytes riga)
//numero di righe da copiare
//flag destinazione
hipMemcpy2D(d_b,pitch,h_b,n*sizeof(int),n*sizeof(int),m,hipMemcpyHostToDevice);
//lancio il kernel
calcolaProdPuntuale<<<dimGriglia, dimBlocco>>>(d_a,d_b,d_c,m,n,pitch);
//ricopio dalla memoria device alla memoria host
int *copyFromGPU=(int *)malloc(n*m*sizeof(int));
//usando sempre la cudaMemcpy2D perchè nell'origine ci sono celle di padding
hipMemcpy2D(copyFromGPU,n*sizeof(int),d_c,pitch,n*sizeof(int),m,hipMemcpyDeviceToHost);
//stampo
stampaCPU(copyFromGPU,m,n);
//libero memoria
free(h_a);
free(h_b);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <time.h>
using namespace std;
//prodotto puntuale tra 2 matrici, usando il padding di memoria (pitch)
//input size matrici (m,n) , dimensioni blocchi (righe di thread, colonne di thread)
__host__
void inizializzaCPU(int *a,int m,int n){
srand((unsigned int)time(NULL));
for(int i=0;i<m;i++)
for(int j=0;j<n;j++)
a[i*n+j]=1+rand()%10;
}
__host__
void stampaCPU(int *a,int m,int n){
cout<<"--------------------------------"<<endl;
for(int i=0;i<m;i++){
for(int j=0;j<n;j++)
cout<<a[i*n+j]<<" ";
cout<<endl;
}
}
__global__
void calcolaProdPuntuale(int *a,int *b,int *c,int m,int n,int pitch){
//attenzione a come si accede alle matrici allocate in memoria gpu perchè c'e' il pitch
int nColonneEffettive = pitch/sizeof(int); //perchè il pitch è la lunghezza effettiva delle righe matrici allocate in memoria gpu, ma in bytes
int iRiga = threadIdx.x + blockIdx.x * blockDim.x; //indice riga del thread GLOBALMENTE alla griglia
int iCol = threadIdx.y + blockIdx.y * blockDim.y ; //indice colonna del thread GLOBALMENTE alla griglia
if(iRiga >= m || iCol >=n)
return;
c[iRiga * nColonneEffettive + iCol] = a[iRiga * nColonneEffettive + iCol] * b[iRiga * nColonneEffettive + iCol];
}
int main(int argc,char *argv[]){
int m,n;
dim3 dimBlocco; //default constructor, qui salviamo il numero di righe e di colonne (di thread) in un blocco
dim3 dimGriglia; //qui salviamo il numero di righe e di colonne (di blocchi) della griglia
if(argc!=5){
m=5;
n=5;
dimBlocco.x=2; //2 righe di thread in un blocco
dimBlocco.y=2; //2 colonne di thread in un blocco
//quindi 4 thread in totale per un blocco
}
else{
sscanf(argv[1],"%d",&m);
sscanf(argv[2],"%d",&n);
sscanf(argv[3],"%d",& dimBlocco.x); //n righe di thread in blocco
sscanf(argv[4],"%d",& dimBlocco.y); //n di colonne di thread in blocco
}
dimGriglia.x = m / dimBlocco.x;
if(m % dimBlocco.x != 0)
dimBlocco.x ++;
dimGriglia.y = n / dimBlocco.y;
if(n % dimBlocco.y != 0)
dimBlocco.y ++;
//strutture dati su host
int *h_a,*h_b,*h_c;
//alloco
h_a=(int *)malloc(n*m*sizeof(int));
h_b=(int *)malloc(n*m*sizeof(int));
h_c=(int *)malloc(n*m*sizeof(int));
//inizializzo le matrici
inizializzaCPU(h_a,m,n);
inizializzaCPU(h_b,m,n);
stampaCPU(h_a,m,n);
stampaCPU(h_b,m,n);
//per le strutture dati sulla memoria gpu
int *d_a,*d_b,*d_c;
size_t pitch; //qui verrà salvato dalla cudaMallocPitch la lunghezza effettiva (paddata) in bytes
//ATTENZIONE CHE IL PITCH DEVE ESSERE UNSIGNED LONG
//alloco memoria sull'heap gpu usando il pitch (padding)
hipMallocPitch(&d_a,&pitch,n*sizeof(int),m);
hipMallocPitch((void **)&d_b,&pitch,n*sizeof(int),m);
hipMallocPitch((void **)&d_c,&pitch,n*sizeof(int),m);
//ora in pitch c'e' la lunghezza effettiva (in bytes) delle righe
//ora devo copiare il contenuto delle matrici dalla memoria host a memoria device
//pero' la memoria device è paddata (pitch) quindi devo usare la cudaMemcpy2D per evitare
//di inserire dati in quelle che sono le celle di padding, quindi...
hipMemcpy2D(d_a,pitch,h_a,n*sizeof(int),n*sizeof(int),m,hipMemcpyHostToDevice);
//i parametri sono : destinazione, lunghezza effettiva in bytes delle righe in memoria device (quindi quest'informazione sta in pitch)
//origine, lunghezza in bytes delle righe nella memoria host (potrebbe esserci il pitch anche qui)
//numero di bytes da copiare nella direzione orizzontale (lunghezza bytes riga)
//numero di righe da copiare
//flag destinazione
hipMemcpy2D(d_b,pitch,h_b,n*sizeof(int),n*sizeof(int),m,hipMemcpyHostToDevice);
//lancio il kernel
calcolaProdPuntuale<<<dimGriglia, dimBlocco>>>(d_a,d_b,d_c,m,n,pitch);
//ricopio dalla memoria device alla memoria host
int *copyFromGPU=(int *)malloc(n*m*sizeof(int));
//usando sempre la cudaMemcpy2D perchè nell'origine ci sono celle di padding
hipMemcpy2D(copyFromGPU,n*sizeof(int),d_c,pitch,n*sizeof(int),m,hipMemcpyDeviceToHost);
//stampo
stampaCPU(copyFromGPU,m,n);
//libero memoria
free(h_a);
free(h_b);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19calcolaProdPuntualePiS_S_iii
.globl _Z19calcolaProdPuntualePiS_S_iii
.p2align 8
.type _Z19calcolaProdPuntualePiS_S_iii,@function
_Z19calcolaProdPuntualePiS_S_iii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x34
s_load_b64 s[2:3], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s4, 0xffff
s_lshr_b32 s4, s4, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s5, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s4, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s3, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b32 s2, s[0:1], 0x20
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_ashr_i32 s2, s2, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v0, s2, v[1:2]
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_mul_lo_u32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19calcolaProdPuntualePiS_S_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19calcolaProdPuntualePiS_S_iii, .Lfunc_end0-_Z19calcolaProdPuntualePiS_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19calcolaProdPuntualePiS_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19calcolaProdPuntualePiS_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <time.h>
using namespace std;
//prodotto puntuale tra 2 matrici, usando il padding di memoria (pitch)
//input size matrici (m,n) , dimensioni blocchi (righe di thread, colonne di thread)
__host__
void inizializzaCPU(int *a,int m,int n){
srand((unsigned int)time(NULL));
for(int i=0;i<m;i++)
for(int j=0;j<n;j++)
a[i*n+j]=1+rand()%10;
}
__host__
void stampaCPU(int *a,int m,int n){
cout<<"--------------------------------"<<endl;
for(int i=0;i<m;i++){
for(int j=0;j<n;j++)
cout<<a[i*n+j]<<" ";
cout<<endl;
}
}
__global__
void calcolaProdPuntuale(int *a,int *b,int *c,int m,int n,int pitch){
//attenzione a come si accede alle matrici allocate in memoria gpu perchè c'e' il pitch
int nColonneEffettive = pitch/sizeof(int); //perchè il pitch è la lunghezza effettiva delle righe matrici allocate in memoria gpu, ma in bytes
int iRiga = threadIdx.x + blockIdx.x * blockDim.x; //indice riga del thread GLOBALMENTE alla griglia
int iCol = threadIdx.y + blockIdx.y * blockDim.y ; //indice colonna del thread GLOBALMENTE alla griglia
if(iRiga >= m || iCol >=n)
return;
c[iRiga * nColonneEffettive + iCol] = a[iRiga * nColonneEffettive + iCol] * b[iRiga * nColonneEffettive + iCol];
}
int main(int argc,char *argv[]){
int m,n;
dim3 dimBlocco; //default constructor, qui salviamo il numero di righe e di colonne (di thread) in un blocco
dim3 dimGriglia; //qui salviamo il numero di righe e di colonne (di blocchi) della griglia
if(argc!=5){
m=5;
n=5;
dimBlocco.x=2; //2 righe di thread in un blocco
dimBlocco.y=2; //2 colonne di thread in un blocco
//quindi 4 thread in totale per un blocco
}
else{
sscanf(argv[1],"%d",&m);
sscanf(argv[2],"%d",&n);
sscanf(argv[3],"%d",& dimBlocco.x); //n righe di thread in blocco
sscanf(argv[4],"%d",& dimBlocco.y); //n di colonne di thread in blocco
}
dimGriglia.x = m / dimBlocco.x;
if(m % dimBlocco.x != 0)
dimBlocco.x ++;
dimGriglia.y = n / dimBlocco.y;
if(n % dimBlocco.y != 0)
dimBlocco.y ++;
//strutture dati su host
int *h_a,*h_b,*h_c;
//alloco
h_a=(int *)malloc(n*m*sizeof(int));
h_b=(int *)malloc(n*m*sizeof(int));
h_c=(int *)malloc(n*m*sizeof(int));
//inizializzo le matrici
inizializzaCPU(h_a,m,n);
inizializzaCPU(h_b,m,n);
stampaCPU(h_a,m,n);
stampaCPU(h_b,m,n);
//per le strutture dati sulla memoria gpu
int *d_a,*d_b,*d_c;
size_t pitch; //qui verrà salvato dalla cudaMallocPitch la lunghezza effettiva (paddata) in bytes
//ATTENZIONE CHE IL PITCH DEVE ESSERE UNSIGNED LONG
//alloco memoria sull'heap gpu usando il pitch (padding)
hipMallocPitch(&d_a,&pitch,n*sizeof(int),m);
hipMallocPitch((void **)&d_b,&pitch,n*sizeof(int),m);
hipMallocPitch((void **)&d_c,&pitch,n*sizeof(int),m);
//ora in pitch c'e' la lunghezza effettiva (in bytes) delle righe
//ora devo copiare il contenuto delle matrici dalla memoria host a memoria device
//pero' la memoria device è paddata (pitch) quindi devo usare la cudaMemcpy2D per evitare
//di inserire dati in quelle che sono le celle di padding, quindi...
hipMemcpy2D(d_a,pitch,h_a,n*sizeof(int),n*sizeof(int),m,hipMemcpyHostToDevice);
//i parametri sono : destinazione, lunghezza effettiva in bytes delle righe in memoria device (quindi quest'informazione sta in pitch)
//origine, lunghezza in bytes delle righe nella memoria host (potrebbe esserci il pitch anche qui)
//numero di bytes da copiare nella direzione orizzontale (lunghezza bytes riga)
//numero di righe da copiare
//flag destinazione
hipMemcpy2D(d_b,pitch,h_b,n*sizeof(int),n*sizeof(int),m,hipMemcpyHostToDevice);
//lancio il kernel
calcolaProdPuntuale<<<dimGriglia, dimBlocco>>>(d_a,d_b,d_c,m,n,pitch);
//ricopio dalla memoria device alla memoria host
int *copyFromGPU=(int *)malloc(n*m*sizeof(int));
//usando sempre la cudaMemcpy2D perchè nell'origine ci sono celle di padding
hipMemcpy2D(copyFromGPU,n*sizeof(int),d_c,pitch,n*sizeof(int),m,hipMemcpyDeviceToHost);
//stampo
stampaCPU(copyFromGPU,m,n);
//libero memoria
free(h_a);
free(h_b);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
} | .text
.file "eserc10.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z14inizializzaCPUPiii # -- Begin function _Z14inizializzaCPUPiii
.p2align 4, 0x90
.type _Z14inizializzaCPUPiii,@function
_Z14inizializzaCPUPiii: # @_Z14inizializzaCPUPiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %ebx
movl %esi, %ebp
movq %rdi, 8(%rsp) # 8-byte Spill
xorl %r15d, %r15d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
testl %ebp, %ebp
jle .LBB0_6
# %bb.1: # %.preheader.lr.ph
movl %ebp, %eax
movq %rax, 16(%rsp) # 8-byte Spill
movl %ebx, %r13d
xorl %ebp, %ebp
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_5: # %._crit_edge
# in Loop: Header=BB0_2 Depth=1
incq %rbp
addl %ebx, %r15d
cmpq 16(%rsp), %rbp # 8-byte Folded Reload
je .LBB0_6
.LBB0_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
testl %ebx, %ebx
jle .LBB0_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r15d, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB0_4: # Parent Loop BB0_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
incl %eax
movl %eax, (%r14,%r12,4)
incq %r12
cmpq %r12, %r13
jne .LBB0_4
jmp .LBB0_5
.LBB0_6: # %._crit_edge13
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z14inizializzaCPUPiii, .Lfunc_end0-_Z14inizializzaCPUPiii
.cfi_endproc
# -- End function
.globl _Z9stampaCPUPiii # -- Begin function _Z9stampaCPUPiii
.p2align 4, 0x90
.type _Z9stampaCPUPiii,@function
_Z9stampaCPUPiii: # @_Z9stampaCPUPiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %ebx
movl %esi, %ebp
movq %rdi, 8(%rsp) # 8-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $32, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r15
testq %r15, %r15
je .LBB1_15
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r15)
je .LBB1_3
# %bb.2:
movzbl 67(%r15), %eax
jmp .LBB1_4
.LBB1_3:
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
testl %ebp, %ebp
jle .LBB1_14
# %bb.5: # %.preheader.lr.ph
movl %ebp, %eax
movq %rax, 16(%rsp) # 8-byte Spill
movl %ebx, %r13d
xorl %ebp, %ebp
xorl %r14d, %r14d
jmp .LBB1_6
.p2align 4, 0x90
.LBB1_12: # in Loop: Header=BB1_6 Depth=1
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_13: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit15
# in Loop: Header=BB1_6 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r14
addl %ebx, %ebp
cmpq 16(%rsp), %r14 # 8-byte Folded Reload
je .LBB1_14
.LBB1_6: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_8 Depth 2
testl %ebx, %ebx
jle .LBB1_9
# %bb.7: # %.lr.ph
# in Loop: Header=BB1_6 Depth=1
movl %ebp, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_8: # Parent Loop BB1_6 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r15,%r12,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r12
cmpq %r12, %r13
jne .LBB1_8
.LBB1_9: # %._crit_edge
# in Loop: Header=BB1_6 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r15
testq %r15, %r15
je .LBB1_15
# %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i12
# in Loop: Header=BB1_6 Depth=1
cmpb $0, 56(%r15)
je .LBB1_12
# %bb.11: # in Loop: Header=BB1_6 Depth=1
movzbl 67(%r15), %eax
jmp .LBB1_13
.LBB1_14: # %._crit_edge18
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_15:
.cfi_def_cfa_offset 80
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size _Z9stampaCPUPiii, .Lfunc_end1-_Z9stampaCPUPiii
.cfi_endproc
# -- End function
.globl _Z34__device_stub__calcolaProdPuntualePiS_S_iii # -- Begin function _Z34__device_stub__calcolaProdPuntualePiS_S_iii
.p2align 4, 0x90
.type _Z34__device_stub__calcolaProdPuntualePiS_S_iii,@function
_Z34__device_stub__calcolaProdPuntualePiS_S_iii: # @_Z34__device_stub__calcolaProdPuntualePiS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z19calcolaProdPuntualePiS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z34__device_stub__calcolaProdPuntualePiS_S_iii, .Lfunc_end2-_Z34__device_stub__calcolaProdPuntualePiS_S_iii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $248, %rsp
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movabsq $4294967297, %rax # imm = 0x100000001
movq %rax, 24(%rsp)
movl $1, 32(%rsp)
cmpl $5, %edi
jne .LBB3_1
# %bb.2:
leaq 28(%rsp), %rbx
movq 8(%rsi), %rdi
leaq 20(%rsp), %rdx
movq %rsi, %r14
movl $.L.str.2, %esi
xorl %eax, %eax
callq __isoc23_sscanf
movq 16(%r14), %rdi
leaq 16(%rsp), %rdx
movl $.L.str.2, %esi
xorl %eax, %eax
callq __isoc23_sscanf
movq 24(%r14), %rdi
leaq 24(%rsp), %rdx
movl $.L.str.2, %esi
xorl %eax, %eax
callq __isoc23_sscanf
movq 32(%r14), %rdi
movl $.L.str.2, %esi
movq %rbx, %rdx
xorl %eax, %eax
callq __isoc23_sscanf
jmp .LBB3_3
.LBB3_1:
movl $5, 20(%rsp)
movl $5, 16(%rsp)
movabsq $8589934594, %rax # imm = 0x200000002
movq %rax, 24(%rsp)
.LBB3_3:
movl 20(%rsp), %r12d
movl 24(%rsp), %ecx
movl %r12d, %eax
xorl %edx, %edx
divl %ecx
# kill: def $eax killed $eax def $rax
movq %rax, 112(%rsp) # 8-byte Spill
testl %edx, %edx
je .LBB3_5
# %bb.4:
incl %ecx
movl %ecx, 24(%rsp)
.LBB3_5:
movl 16(%rsp), %ebp
movl 28(%rsp), %ecx
movl %ebp, %eax
xorl %edx, %edx
divl %ecx
# kill: def $eax killed $eax def $rax
testl %edx, %edx
je .LBB3_7
# %bb.6:
incl %ecx
movl %ecx, 28(%rsp)
.LBB3_7:
shlq $32, %rax
movq %rax, 88(%rsp) # 8-byte Spill
movl %ebp, %eax
imull %r12d, %eax
movslq %eax, %r14
shlq $2, %r14
movq %r14, %rdi
callq malloc
movq %rax, 104(%rsp) # 8-byte Spill
movq %r14, %rdi
callq malloc
movq %rax, 96(%rsp) # 8-byte Spill
xorl %r13d, %r13d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
testl %r12d, %r12d
jle .LBB3_13
# %bb.8: # %.preheader.lr.ph.i
xorl %r15d, %r15d
jmp .LBB3_9
.p2align 4, 0x90
.LBB3_12: # %._crit_edge.i
# in Loop: Header=BB3_9 Depth=1
incq %r15
addl %ebp, %r13d
cmpq %r12, %r15
je .LBB3_13
.LBB3_9: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB3_11 Depth 2
testl %ebp, %ebp
jle .LBB3_12
# %bb.10: # %.lr.ph.i
# in Loop: Header=BB3_9 Depth=1
movl %r13d, %eax
movq 104(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_11: # Parent Loop BB3_9 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
incl %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq %r14, %rbp
jne .LBB3_11
jmp .LBB3_12
.LBB3_13: # %_Z14inizializzaCPUPiii.exit
movq 112(%rsp), %rax # 8-byte Reload
addq %rax, 88(%rsp) # 8-byte Folded Spill
movl 20(%rsp), %r12d
movl 16(%rsp), %r13d
xorl %ebp, %ebp
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
testl %r12d, %r12d
jle .LBB3_19
# %bb.14: # %.preheader.lr.ph.i25
xorl %r15d, %r15d
jmp .LBB3_15
.p2align 4, 0x90
.LBB3_18: # %._crit_edge.i29
# in Loop: Header=BB3_15 Depth=1
incq %r15
addl %r13d, %ebp
cmpq %r12, %r15
je .LBB3_19
.LBB3_15: # %.preheader.i27
# =>This Loop Header: Depth=1
# Child Loop BB3_17 Depth 2
testl %r13d, %r13d
jle .LBB3_18
# %bb.16: # %.lr.ph.i32
# in Loop: Header=BB3_15 Depth=1
movl %ebp, %eax
movq 96(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_17: # Parent Loop BB3_15 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
incl %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq %r14, %r13
jne .LBB3_17
jmp .LBB3_18
.LBB3_19: # %_Z14inizializzaCPUPiii.exit37
movl 20(%rsp), %esi
movl 16(%rsp), %edx
movq 104(%rsp), %rbx # 8-byte Reload
movq %rbx, %rdi
callq _Z9stampaCPUPiii
movl 20(%rsp), %esi
movl 16(%rsp), %edx
movq 96(%rsp), %r14 # 8-byte Reload
movq %r14, %rdi
callq _Z9stampaCPUPiii
movslq 16(%rsp), %rdx
shlq $2, %rdx
movslq 20(%rsp), %rcx
leaq 64(%rsp), %rdi
leaq 40(%rsp), %r12
movq %r12, %rsi
callq hipMallocPitch
movslq 16(%rsp), %rdx
shlq $2, %rdx
movslq 20(%rsp), %rcx
leaq 56(%rsp), %rdi
movq %r12, %rsi
callq hipMallocPitch
movslq 16(%rsp), %rdx
shlq $2, %rdx
movslq 20(%rsp), %rcx
leaq 48(%rsp), %rdi
movq %r12, %rsi
callq hipMallocPitch
movq 64(%rsp), %rdi
movq 40(%rsp), %rsi
movslq 16(%rsp), %rcx
shlq $2, %rcx
movslq 20(%rsp), %r9
movl $1, (%rsp)
movq %rbx, %rdx
movq %rcx, %r8
callq hipMemcpy2D
movq 56(%rsp), %rdi
movq 40(%rsp), %rsi
movslq 16(%rsp), %rcx
shlq $2, %rcx
movslq 20(%rsp), %r9
movl $1, (%rsp)
movq %r14, %rdx
movq %rcx, %r8
callq hipMemcpy2D
movq 24(%rsp), %rdx
movl 32(%rsp), %ecx
movq 88(%rsp), %rdi # 8-byte Reload
movl $1, %esi
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_21
# %bb.20:
movq 64(%rsp), %rax
movq 56(%rsp), %rcx
movq 48(%rsp), %rdx
movl 20(%rsp), %esi
movl 16(%rsp), %edi
movl 40(%rsp), %r8d
movq %rax, 184(%rsp)
movq %rcx, 176(%rsp)
movq %rdx, 168(%rsp)
movl %esi, 84(%rsp)
movl %edi, 80(%rsp)
movl %r8d, 76(%rsp)
leaq 184(%rsp), %rax
movq %rax, 192(%rsp)
leaq 176(%rsp), %rax
movq %rax, 200(%rsp)
leaq 168(%rsp), %rax
movq %rax, 208(%rsp)
leaq 84(%rsp), %rax
movq %rax, 216(%rsp)
leaq 80(%rsp), %rax
movq %rax, 224(%rsp)
leaq 76(%rsp), %rax
movq %rax, 232(%rsp)
leaq 152(%rsp), %rdi
leaq 136(%rsp), %rsi
leaq 128(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rax
movq 120(%rsp), %rdi
movq 152(%rsp), %rsi
movl 160(%rsp), %edx
movq 136(%rsp), %rcx
movl 144(%rsp), %r8d
movq %rdi, 8(%rsp)
movq %rax, (%rsp)
leaq 192(%rsp), %r9
movl $_Z19calcolaProdPuntualePiS_S_iii, %edi
callq hipLaunchKernel
.LBB3_21:
movslq 16(%rsp), %r15
movslq 20(%rsp), %r12
movq %r12, %rdi
imulq %r15, %rdi
shlq $2, %rdi
callq malloc
movq %rax, %r13
shlq $2, %r15
movq 48(%rsp), %rdx
movq 40(%rsp), %rcx
movl $2, (%rsp)
movq %rax, %rdi
movq %r15, %rsi
movq %r15, %r8
movq %r12, %r9
callq hipMemcpy2D
movl 20(%rsp), %esi
movl 16(%rsp), %edx
movq %r13, %rdi
callq _Z9stampaCPUPiii
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq 64(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $248, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19calcolaProdPuntualePiS_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "--------------------------------"
.size .L.str, 33
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " "
.size .L.str.1, 2
.type _Z19calcolaProdPuntualePiS_S_iii,@object # @_Z19calcolaProdPuntualePiS_S_iii
.section .rodata,"a",@progbits
.globl _Z19calcolaProdPuntualePiS_S_iii
.p2align 3, 0x0
_Z19calcolaProdPuntualePiS_S_iii:
.quad _Z34__device_stub__calcolaProdPuntualePiS_S_iii
.size _Z19calcolaProdPuntualePiS_S_iii, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "%d"
.size .L.str.2, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z19calcolaProdPuntualePiS_S_iii"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__calcolaProdPuntualePiS_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z19calcolaProdPuntualePiS_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z19calcolaProdPuntualePiS_S_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e680000002100 */
/*0040*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e620000002500 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R5, c[0x0][0x0], R0 ; /* 0x0000000005007a24 */
/* 0x002fca00078e0200 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ ULDC UR4, c[0x0][0x180] ; /* 0x0000600000047ab9 */
/* 0x000fe20000000800 */
/*00b0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00c0*/ USHF.R.S32.HI UR4, URZ, 0x2, UR4 ; /* 0x000000023f047899 */
/* 0x000fcc0008011404 */
/*00d0*/ IMAD R0, R0, UR4, R3 ; /* 0x0000000400007c24 */
/* 0x000fe2000f8e0203 */
/*00e0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00f0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*0100*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0207 */
/*0110*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0120*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0130*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0207 */
/*0140*/ IMAD R9, R4, R3, RZ ; /* 0x0000000304097224 */
/* 0x004fca00078e02ff */
/*0150*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19calcolaProdPuntualePiS_S_iii
.globl _Z19calcolaProdPuntualePiS_S_iii
.p2align 8
.type _Z19calcolaProdPuntualePiS_S_iii,@function
_Z19calcolaProdPuntualePiS_S_iii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x34
s_load_b64 s[2:3], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s4, 0xffff
s_lshr_b32 s4, s4, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s5, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s4, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s3, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b32 s2, s[0:1], 0x20
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_ashr_i32 s2, s2, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v0, s2, v[1:2]
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_mul_lo_u32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19calcolaProdPuntualePiS_S_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19calcolaProdPuntualePiS_S_iii, .Lfunc_end0-_Z19calcolaProdPuntualePiS_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19calcolaProdPuntualePiS_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19calcolaProdPuntualePiS_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00003f3a_00000000-6_eserc10.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z14inizializzaCPUPiii
.type _Z14inizializzaCPUPiii, @function
_Z14inizializzaCPUPiii:
.LFB3669:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r15
movl %esi, %ebx
movl %esi, 4(%rsp)
movl %edx, %r14d
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
testl %ebx, %ebx
jle .L3
movl $0, %r13d
movl $0, %r12d
movslq %r14d, %rax
movq %rax, 8(%rsp)
jmp .L5
.L7:
movslq %r13d, %rax
leaq (%r15,%rax,4), %rbx
movq 8(%rsp), %rsi
addq %rsi, %rax
leaq (%r15,%rax,4), %rbp
.L6:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
addl $1, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L6
.L8:
addl $1, %r12d
addl %r14d, %r13d
cmpl %r12d, 4(%rsp)
je .L3
.L5:
testl %r14d, %r14d
jg .L7
jmp .L8
.L3:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z14inizializzaCPUPiii, .-_Z14inizializzaCPUPiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "--------------------------------"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string " "
.text
.globl _Z9stampaCPUPiii
.type _Z9stampaCPUPiii, @function
_Z9stampaCPUPiii:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 16(%rsp)
movl %esi, 12(%rsp)
movl %edx, 8(%rsp)
movl $32, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbx
testq %rbx, %rbx
je .L25
cmpb $0, 56(%rbx)
je .L13
movzbl 67(%rbx), %eax
.L14:
movsbl %al, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
cmpl $0, 12(%rsp)
jle .L11
movl $0, %r15d
movl $0, %r14d
movslq 8(%rsp), %rax
movq %rax, 24(%rsp)
leaq _ZSt4cout(%rip), %rbp
leaq .LC1(%rip), %r13
jmp .L16
.L25:
call _ZSt16__throw_bad_castv@PLT
.L13:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
jmp .L14
.L26:
call _ZSt16__throw_bad_castv@PLT
.L27:
movzbl 67(%rbx), %esi
.L20:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addl $1, %r14d
movl 8(%rsp), %eax
addl %eax, %r15d
cmpl %r14d, 12(%rsp)
je .L11
.L16:
cmpl $0, 8(%rsp)
jle .L22
movslq %r15d, %rax
movq 16(%rsp), %rcx
leaq (%rcx,%rax,4), %rbx
movq 24(%rsp), %rdx
addq %rdx, %rax
leaq (%rcx,%rax,4), %r12
.L17:
movl (%rbx), %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %r13, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L17
.L22:
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %rbx
testq %rbx, %rbx
je .L26
cmpb $0, 56(%rbx)
jne .L27
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L20
.L11:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z9stampaCPUPiii, .-_Z9stampaCPUPiii
.globl _Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii
.type _Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii, @function
_Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii:
.LFB3696:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L32
.L28:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L33
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z19calcolaProdPuntualePiS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L28
.L33:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii, .-_Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii
.globl _Z19calcolaProdPuntualePiS_S_iii
.type _Z19calcolaProdPuntualePiS_S_iii, @function
_Z19calcolaProdPuntualePiS_S_iii:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z19calcolaProdPuntualePiS_S_iii, .-_Z19calcolaProdPuntualePiS_S_iii
.section .rodata.str1.1
.LC2:
.string "%d"
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $80, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 68(%rsp)
cmpl $5, %edi
je .L37
movl $5, 8(%rsp)
movl $5, 12(%rsp)
movl $2, 52(%rsp)
movl $2, 60(%rsp)
movl $2, %ecx
movl $5, %r14d
.L38:
addl $1, %ecx
movl %ecx, 48(%rsp)
.L39:
movl 12(%rsp), %r13d
movl 52(%rsp), %ecx
movl %r13d, %eax
movl $0, %edx
divl %ecx
movl %eax, 64(%rsp)
testl %edx, %edx
je .L40
addl $1, %ecx
movl %ecx, 52(%rsp)
.L40:
movl %r13d, %ebx
imull %r14d, %ebx
movslq %ebx, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
movl %r13d, %edx
movl %r14d, %esi
movq %r12, %rdi
call _Z14inizializzaCPUPiii
movl 12(%rsp), %edx
movl 8(%rsp), %esi
movq %rbp, %rdi
call _Z14inizializzaCPUPiii
movl 12(%rsp), %edx
movl 8(%rsp), %esi
movq %r12, %rdi
call _Z9stampaCPUPiii
movl 12(%rsp), %edx
movl 8(%rsp), %esi
movq %rbp, %rdi
call _Z9stampaCPUPiii
movslq 8(%rsp), %rcx
movslq 12(%rsp), %rdx
salq $2, %rdx
leaq 40(%rsp), %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocPitch@PLT
movslq 8(%rsp), %rcx
movslq 12(%rsp), %rdx
salq $2, %rdx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocPitch@PLT
movslq 8(%rsp), %rcx
movslq 12(%rsp), %rdx
salq $2, %rdx
leaq 32(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocPitch@PLT
movslq 12(%rsp), %rcx
salq $2, %rcx
subq $8, %rsp
.cfi_def_cfa_offset 136
pushq $1
.cfi_def_cfa_offset 144
movslq 24(%rsp), %r9
movq %rcx, %r8
movq %r12, %rdx
movq 56(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy2D@PLT
movslq 28(%rsp), %rcx
salq $2, %rcx
movl $1, (%rsp)
movslq 24(%rsp), %r9
movq %rcx, %r8
movq %rbp, %rdx
movq 56(%rsp), %rsi
movq 40(%rsp), %rdi
call cudaMemcpy2D@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
movl 56(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movq 60(%rsp), %rdi
movl 68(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L44
.L41:
movl 12(%rsp), %ebx
movl 8(%rsp), %r14d
movl %ebx, %edi
imull %r14d, %edi
movslq %edi, %rdi
salq $2, %rdi
call malloc@PLT
movq %rax, %r13
movslq %ebx, %rsi
salq $2, %rsi
subq $8, %rsp
.cfi_def_cfa_offset 136
pushq $2
.cfi_def_cfa_offset 144
movslq %r14d, %r9
movq %rsi, %r8
movq 56(%rsp), %rcx
movq 48(%rsp), %rdx
movq %rax, %rdi
call cudaMemcpy2D@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
movl 12(%rsp), %edx
movl 8(%rsp), %esi
movq %r13, %rdi
call _Z9stampaCPUPiii
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L45
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
movq %rsi, %rbx
leaq 8(%rsp), %rdx
movq 8(%rsi), %rdi
leaq .LC2(%rip), %rbp
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
leaq 12(%rsp), %rdx
movq 16(%rbx), %rdi
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
leaq 48(%rsp), %rdx
movq 24(%rbx), %rdi
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
leaq 52(%rsp), %rdx
movq 32(%rbx), %rdi
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
movl 8(%rsp), %r14d
movl 48(%rsp), %ecx
movl %r14d, %eax
movl $0, %edx
divl %ecx
movl %eax, 60(%rsp)
testl %edx, %edx
je .L39
jmp .L38
.L44:
movl 40(%rsp), %r9d
movl 12(%rsp), %r8d
movl 8(%rsp), %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z46__device_stub__Z19calcolaProdPuntualePiS_S_iiiPiS_S_iii
jmp .L41
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC3:
.string "_Z19calcolaProdPuntualePiS_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z19calcolaProdPuntualePiS_S_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "eserc10.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z14inizializzaCPUPiii # -- Begin function _Z14inizializzaCPUPiii
.p2align 4, 0x90
.type _Z14inizializzaCPUPiii,@function
_Z14inizializzaCPUPiii: # @_Z14inizializzaCPUPiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %ebx
movl %esi, %ebp
movq %rdi, 8(%rsp) # 8-byte Spill
xorl %r15d, %r15d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
testl %ebp, %ebp
jle .LBB0_6
# %bb.1: # %.preheader.lr.ph
movl %ebp, %eax
movq %rax, 16(%rsp) # 8-byte Spill
movl %ebx, %r13d
xorl %ebp, %ebp
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_5: # %._crit_edge
# in Loop: Header=BB0_2 Depth=1
incq %rbp
addl %ebx, %r15d
cmpq 16(%rsp), %rbp # 8-byte Folded Reload
je .LBB0_6
.LBB0_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
testl %ebx, %ebx
jle .LBB0_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r15d, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB0_4: # Parent Loop BB0_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
incl %eax
movl %eax, (%r14,%r12,4)
incq %r12
cmpq %r12, %r13
jne .LBB0_4
jmp .LBB0_5
.LBB0_6: # %._crit_edge13
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z14inizializzaCPUPiii, .Lfunc_end0-_Z14inizializzaCPUPiii
.cfi_endproc
# -- End function
.globl _Z9stampaCPUPiii # -- Begin function _Z9stampaCPUPiii
.p2align 4, 0x90
.type _Z9stampaCPUPiii,@function
_Z9stampaCPUPiii: # @_Z9stampaCPUPiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %ebx
movl %esi, %ebp
movq %rdi, 8(%rsp) # 8-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $32, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r15
testq %r15, %r15
je .LBB1_15
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r15)
je .LBB1_3
# %bb.2:
movzbl 67(%r15), %eax
jmp .LBB1_4
.LBB1_3:
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
testl %ebp, %ebp
jle .LBB1_14
# %bb.5: # %.preheader.lr.ph
movl %ebp, %eax
movq %rax, 16(%rsp) # 8-byte Spill
movl %ebx, %r13d
xorl %ebp, %ebp
xorl %r14d, %r14d
jmp .LBB1_6
.p2align 4, 0x90
.LBB1_12: # in Loop: Header=BB1_6 Depth=1
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_13: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit15
# in Loop: Header=BB1_6 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r14
addl %ebx, %ebp
cmpq 16(%rsp), %r14 # 8-byte Folded Reload
je .LBB1_14
.LBB1_6: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_8 Depth 2
testl %ebx, %ebx
jle .LBB1_9
# %bb.7: # %.lr.ph
# in Loop: Header=BB1_6 Depth=1
movl %ebp, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_8: # Parent Loop BB1_6 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r15,%r12,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r12
cmpq %r12, %r13
jne .LBB1_8
.LBB1_9: # %._crit_edge
# in Loop: Header=BB1_6 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r15
testq %r15, %r15
je .LBB1_15
# %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i12
# in Loop: Header=BB1_6 Depth=1
cmpb $0, 56(%r15)
je .LBB1_12
# %bb.11: # in Loop: Header=BB1_6 Depth=1
movzbl 67(%r15), %eax
jmp .LBB1_13
.LBB1_14: # %._crit_edge18
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_15:
.cfi_def_cfa_offset 80
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size _Z9stampaCPUPiii, .Lfunc_end1-_Z9stampaCPUPiii
.cfi_endproc
# -- End function
.globl _Z34__device_stub__calcolaProdPuntualePiS_S_iii # -- Begin function _Z34__device_stub__calcolaProdPuntualePiS_S_iii
.p2align 4, 0x90
.type _Z34__device_stub__calcolaProdPuntualePiS_S_iii,@function
_Z34__device_stub__calcolaProdPuntualePiS_S_iii: # @_Z34__device_stub__calcolaProdPuntualePiS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z19calcolaProdPuntualePiS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z34__device_stub__calcolaProdPuntualePiS_S_iii, .Lfunc_end2-_Z34__device_stub__calcolaProdPuntualePiS_S_iii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $248, %rsp
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movabsq $4294967297, %rax # imm = 0x100000001
movq %rax, 24(%rsp)
movl $1, 32(%rsp)
cmpl $5, %edi
jne .LBB3_1
# %bb.2:
leaq 28(%rsp), %rbx
movq 8(%rsi), %rdi
leaq 20(%rsp), %rdx
movq %rsi, %r14
movl $.L.str.2, %esi
xorl %eax, %eax
callq __isoc23_sscanf
movq 16(%r14), %rdi
leaq 16(%rsp), %rdx
movl $.L.str.2, %esi
xorl %eax, %eax
callq __isoc23_sscanf
movq 24(%r14), %rdi
leaq 24(%rsp), %rdx
movl $.L.str.2, %esi
xorl %eax, %eax
callq __isoc23_sscanf
movq 32(%r14), %rdi
movl $.L.str.2, %esi
movq %rbx, %rdx
xorl %eax, %eax
callq __isoc23_sscanf
jmp .LBB3_3
.LBB3_1:
movl $5, 20(%rsp)
movl $5, 16(%rsp)
movabsq $8589934594, %rax # imm = 0x200000002
movq %rax, 24(%rsp)
.LBB3_3:
movl 20(%rsp), %r12d
movl 24(%rsp), %ecx
movl %r12d, %eax
xorl %edx, %edx
divl %ecx
# kill: def $eax killed $eax def $rax
movq %rax, 112(%rsp) # 8-byte Spill
testl %edx, %edx
je .LBB3_5
# %bb.4:
incl %ecx
movl %ecx, 24(%rsp)
.LBB3_5:
movl 16(%rsp), %ebp
movl 28(%rsp), %ecx
movl %ebp, %eax
xorl %edx, %edx
divl %ecx
# kill: def $eax killed $eax def $rax
testl %edx, %edx
je .LBB3_7
# %bb.6:
incl %ecx
movl %ecx, 28(%rsp)
.LBB3_7:
shlq $32, %rax
movq %rax, 88(%rsp) # 8-byte Spill
movl %ebp, %eax
imull %r12d, %eax
movslq %eax, %r14
shlq $2, %r14
movq %r14, %rdi
callq malloc
movq %rax, 104(%rsp) # 8-byte Spill
movq %r14, %rdi
callq malloc
movq %rax, 96(%rsp) # 8-byte Spill
xorl %r13d, %r13d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
testl %r12d, %r12d
jle .LBB3_13
# %bb.8: # %.preheader.lr.ph.i
xorl %r15d, %r15d
jmp .LBB3_9
.p2align 4, 0x90
.LBB3_12: # %._crit_edge.i
# in Loop: Header=BB3_9 Depth=1
incq %r15
addl %ebp, %r13d
cmpq %r12, %r15
je .LBB3_13
.LBB3_9: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB3_11 Depth 2
testl %ebp, %ebp
jle .LBB3_12
# %bb.10: # %.lr.ph.i
# in Loop: Header=BB3_9 Depth=1
movl %r13d, %eax
movq 104(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_11: # Parent Loop BB3_9 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
incl %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq %r14, %rbp
jne .LBB3_11
jmp .LBB3_12
.LBB3_13: # %_Z14inizializzaCPUPiii.exit
movq 112(%rsp), %rax # 8-byte Reload
addq %rax, 88(%rsp) # 8-byte Folded Spill
movl 20(%rsp), %r12d
movl 16(%rsp), %r13d
xorl %ebp, %ebp
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
testl %r12d, %r12d
jle .LBB3_19
# %bb.14: # %.preheader.lr.ph.i25
xorl %r15d, %r15d
jmp .LBB3_15
.p2align 4, 0x90
.LBB3_18: # %._crit_edge.i29
# in Loop: Header=BB3_15 Depth=1
incq %r15
addl %r13d, %ebp
cmpq %r12, %r15
je .LBB3_19
.LBB3_15: # %.preheader.i27
# =>This Loop Header: Depth=1
# Child Loop BB3_17 Depth 2
testl %r13d, %r13d
jle .LBB3_18
# %bb.16: # %.lr.ph.i32
# in Loop: Header=BB3_15 Depth=1
movl %ebp, %eax
movq 96(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_17: # Parent Loop BB3_15 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
incl %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq %r14, %r13
jne .LBB3_17
jmp .LBB3_18
.LBB3_19: # %_Z14inizializzaCPUPiii.exit37
movl 20(%rsp), %esi
movl 16(%rsp), %edx
movq 104(%rsp), %rbx # 8-byte Reload
movq %rbx, %rdi
callq _Z9stampaCPUPiii
movl 20(%rsp), %esi
movl 16(%rsp), %edx
movq 96(%rsp), %r14 # 8-byte Reload
movq %r14, %rdi
callq _Z9stampaCPUPiii
movslq 16(%rsp), %rdx
shlq $2, %rdx
movslq 20(%rsp), %rcx
leaq 64(%rsp), %rdi
leaq 40(%rsp), %r12
movq %r12, %rsi
callq hipMallocPitch
movslq 16(%rsp), %rdx
shlq $2, %rdx
movslq 20(%rsp), %rcx
leaq 56(%rsp), %rdi
movq %r12, %rsi
callq hipMallocPitch
movslq 16(%rsp), %rdx
shlq $2, %rdx
movslq 20(%rsp), %rcx
leaq 48(%rsp), %rdi
movq %r12, %rsi
callq hipMallocPitch
movq 64(%rsp), %rdi
movq 40(%rsp), %rsi
movslq 16(%rsp), %rcx
shlq $2, %rcx
movslq 20(%rsp), %r9
movl $1, (%rsp)
movq %rbx, %rdx
movq %rcx, %r8
callq hipMemcpy2D
movq 56(%rsp), %rdi
movq 40(%rsp), %rsi
movslq 16(%rsp), %rcx
shlq $2, %rcx
movslq 20(%rsp), %r9
movl $1, (%rsp)
movq %r14, %rdx
movq %rcx, %r8
callq hipMemcpy2D
movq 24(%rsp), %rdx
movl 32(%rsp), %ecx
movq 88(%rsp), %rdi # 8-byte Reload
movl $1, %esi
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_21
# %bb.20:
movq 64(%rsp), %rax
movq 56(%rsp), %rcx
movq 48(%rsp), %rdx
movl 20(%rsp), %esi
movl 16(%rsp), %edi
movl 40(%rsp), %r8d
movq %rax, 184(%rsp)
movq %rcx, 176(%rsp)
movq %rdx, 168(%rsp)
movl %esi, 84(%rsp)
movl %edi, 80(%rsp)
movl %r8d, 76(%rsp)
leaq 184(%rsp), %rax
movq %rax, 192(%rsp)
leaq 176(%rsp), %rax
movq %rax, 200(%rsp)
leaq 168(%rsp), %rax
movq %rax, 208(%rsp)
leaq 84(%rsp), %rax
movq %rax, 216(%rsp)
leaq 80(%rsp), %rax
movq %rax, 224(%rsp)
leaq 76(%rsp), %rax
movq %rax, 232(%rsp)
leaq 152(%rsp), %rdi
leaq 136(%rsp), %rsi
leaq 128(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rax
movq 120(%rsp), %rdi
movq 152(%rsp), %rsi
movl 160(%rsp), %edx
movq 136(%rsp), %rcx
movl 144(%rsp), %r8d
movq %rdi, 8(%rsp)
movq %rax, (%rsp)
leaq 192(%rsp), %r9
movl $_Z19calcolaProdPuntualePiS_S_iii, %edi
callq hipLaunchKernel
.LBB3_21:
movslq 16(%rsp), %r15
movslq 20(%rsp), %r12
movq %r12, %rdi
imulq %r15, %rdi
shlq $2, %rdi
callq malloc
movq %rax, %r13
shlq $2, %r15
movq 48(%rsp), %rdx
movq 40(%rsp), %rcx
movl $2, (%rsp)
movq %rax, %rdi
movq %r15, %rsi
movq %r15, %r8
movq %r12, %r9
callq hipMemcpy2D
movl 20(%rsp), %esi
movl 16(%rsp), %edx
movq %r13, %rdi
callq _Z9stampaCPUPiii
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq 64(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $248, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19calcolaProdPuntualePiS_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "--------------------------------"
.size .L.str, 33
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " "
.size .L.str.1, 2
.type _Z19calcolaProdPuntualePiS_S_iii,@object # @_Z19calcolaProdPuntualePiS_S_iii
.section .rodata,"a",@progbits
.globl _Z19calcolaProdPuntualePiS_S_iii
.p2align 3, 0x0
_Z19calcolaProdPuntualePiS_S_iii:
.quad _Z34__device_stub__calcolaProdPuntualePiS_S_iii
.size _Z19calcolaProdPuntualePiS_S_iii, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "%d"
.size .L.str.2, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z19calcolaProdPuntualePiS_S_iii"
.size .L__unnamed_1, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__calcolaProdPuntualePiS_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z19calcolaProdPuntualePiS_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | ////////////////////////////////////////////////////////////////////////
// define kernel block size for
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 32
#define BLOCK_Y 8
// device code
__global__ void GPU_adi_rhs(int NX, int NY, int NZ, float lam,
const float* __restrict__ d_u,
float* __restrict__ d_du,
float* __restrict__ d_ax,
float* __restrict__ d_bx,
float* __restrict__ d_cx,
float* __restrict__ d_ay,
float* __restrict__ d_by,
float* __restrict__ d_cy,
float* __restrict__ d_az,
float* __restrict__ d_bz,
float* __restrict__ d_cz)
{
int i, j, k, indg, active;
float du, a, b, c;
int NXM1 = NX - 1;
int NYM1 = NY - 1;
int NZM1 = NZ - 1;
#define IOFF 1
#define JOFF NX
#define KOFF NX*NY
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*BLOCK_X;
j = threadIdx.y + blockIdx.y*BLOCK_Y;
indg = i + j*NX;
active = (i<NX) && (j<NY);
//
// loop over k-planes
//
for (k = 0; k<NZ; k++) {
//
// calculate r.h.s. and set a,b,c, coefficients
//
if (active) {
if (i == 0 || i == NXM1 || j == 0 || j == NYM1 || k == 0 || k == NZM1) {
du = 0.0f; // Dirichlet b.c.'s
a = 0.0f;
b = 1.0f;
c = 0.0f;
}
else {
du = lam * (d_u[indg - IOFF] + d_u[indg + IOFF]
+ d_u[indg - JOFF] + d_u[indg + JOFF]
+ d_u[indg - KOFF] + d_u[indg + KOFF] - 6.0f*d_u[indg]);
a = -0.5f*lam;
b = 1.0f + lam;
c = -0.5f*lam;
}
d_du[indg] = du;
d_ax[indg] = a;
d_bx[indg] = b;
d_cx[indg] = c;
d_ay[indg] = a;
d_by[indg] = b;
d_cy[indg] = c;
d_az[indg] = a;
d_bz[indg] = b;
d_cz[indg] = c;
indg += KOFF;
}
}
}
__global__ void GPU_adi_x_float4(int NX, int NY, int NZ,
const float4* __restrict__ d_a,
const float4* __restrict__ d_b,
const float4* __restrict__ d_c,
float4* __restrict__ d_d) {
int i, j, k, indg;
float aa, bb, cc, dd, c2[256], d2[256];
float4 a4, b4, c4, d4;
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x;
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY) / 4;
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
a4 = d_a[indg];
b4 = d_b[indg];
c4 = d_c[indg];
d4 = d_d[indg];
bb = 1.0f / b4.x;
cc = bb * c4.x;
dd = bb * d4.x;
c2[0] = cc;
d2[0] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[1] = cc;
d2[1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[2] = cc;
d2[2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[3] = cc;
d2[3] = dd;
for (i = 4; i<NX; i += 4) {
indg = indg + 1;
a4 = d_a[indg];
b4 = d_b[indg];
c4 = d_c[indg];
d4 = d_d[indg];
aa = a4.x;
bb = b4.x - aa*cc;
dd = d4.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.x;
dd = bb*dd;
c2[i] = cc;
d2[i] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[i + 1] = cc;
d2[i + 1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[i + 2] = cc;
d2[i + 2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[i + 3] = cc;
d2[i + 3] = dd;
}
//
// reverse pass
//
d4.w = dd;
dd = d2[NX - 2] - c2[NX - 2] * dd;
d4.z = dd;
dd = d2[NX - 3] - c2[NX - 3] * dd;
d4.y = dd;
dd = d2[NX - 4] - c2[NX - 4] * dd;
d4.x = dd;
d_d[indg] = d4;
for (i = NX - 5; i >= 0; i -= 4) {
indg = indg - 1;
dd = d2[i] - c2[i] * dd;
d4.w = dd;
dd = d2[i - 1] - c2[i - 1] * dd;
d4.z = dd;
dd = d2[i - 2] - c2[i - 2] * dd;
d4.y = dd;
dd = d2[i - 3] - c2[i - 3] * dd;
d4.x = dd;
d_d[indg] = d4;
}
}
}
__global__ void GPU_adi_x_float4_2(int NX, int NY, int NZ,
const float4* __restrict__ d_a,
const float4* __restrict__ d_b,
const float4* __restrict__ d_c,
float4* __restrict__ d_d) {
int i, j, k, indg;
float aa, bb, cc, dd, c2[256], d2[256];
float4 a4, b4, c4, d4, a4_2, b4_2, c4_2, d4_2;
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x;
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY) / 4;
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
a4 = d_a[indg];
a4_2 = d_a[indg + 1];
b4 = d_b[indg];
b4_2 = d_b[indg + 1];
c4 = d_c[indg];
c4_2 = d_c[indg + 1];
d4 = d_d[indg];
d4_2 = d_d[indg + 1];
bb = 1.0f / b4.x;
cc = bb * c4.x;
dd = bb * d4.x;
c2[0] = cc;
d2[0] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[1] = cc;
d2[1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[2] = cc;
d2[2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[3] = cc;
d2[3] = dd;
aa = a4_2.x;
bb = b4_2.x - aa*cc;
dd = d4_2.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.x;
dd = bb*dd;
c2[4] = cc;
d2[4] = dd;
aa = a4_2.y;
bb = b4_2.y - aa*cc;
dd = d4_2.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.y;
dd = bb*dd;
c2[5] = cc;
d2[5] = dd;
aa = a4_2.z;
bb = b4_2.z - aa*cc;
dd = d4_2.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.z;
dd = bb*dd;
c2[6] = cc;
d2[6] = dd;
aa = a4_2.w;
bb = b4_2.w - aa*cc;
dd = d4_2.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.w;
dd = bb*dd;
c2[7] = cc;
d2[7] = dd;
for (i = 8; i<NX; i += 8) {
indg = indg + 2;
a4 = d_a[indg];
a4_2 = d_a[indg + 1];
__threadfence_block();
b4 = d_b[indg];
b4_2 = d_b[indg + 1];
__threadfence_block();
c4 = d_c[indg];
c4_2 = d_c[indg + 1];
__threadfence_block();
d4 = d_d[indg];
d4_2 = d_d[indg + 1];
aa = a4.x;
bb = b4.x - aa*cc;
dd = d4.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.x;
dd = bb*dd;
c2[i] = cc;
d2[i] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[i + 1] = cc;
d2[i + 1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[i + 2] = cc;
d2[i + 2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[i + 3] = cc;
d2[i + 3] = dd;
aa = a4_2.x;
bb = b4_2.x - aa*cc;
dd = d4_2.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.x;
dd = bb*dd;
c2[i + 4] = cc;
d2[i + 4] = dd;
aa = a4_2.y;
bb = b4_2.y - aa*cc;
dd = d4_2.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.y;
dd = bb*dd;
c2[i + 5] = cc;
d2[i + 5] = dd;
aa = a4_2.z;
bb = b4_2.z - aa*cc;
dd = d4_2.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.z;
dd = bb*dd;
c2[i + 6] = cc;
d2[i + 6] = dd;
aa = a4_2.w;
bb = b4_2.w - aa*cc;
dd = d4_2.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.w;
dd = bb*dd;
c2[i + 7] = cc;
d2[i + 7] = dd;
}
//
// reverse pass
//
d4_2.w = dd;
dd = d2[NX - 2] - c2[NX - 2] * dd;
d4_2.z = dd;
dd = d2[NX - 3] - c2[NX - 3] * dd;
d4_2.y = dd;
dd = d2[NX - 4] - c2[NX - 4] * dd;
d4_2.x = dd;
dd = d2[NX - 5] - c2[NX - 5] * dd;
d4.w = dd;
dd = d2[NX - 6] - c2[NX - 6] * dd;
d4.z = dd;
dd = d2[NX - 7] - c2[NX - 7] * dd;
d4.y = dd;
dd = d2[NX - 8] - c2[NX - 8] * dd;
d4.x = dd;
d_d[indg + 1] = d4_2;
d_d[indg] = d4;
for (i = NX - 9; i >= 0; i -= 8) {
indg = indg - 2;
dd = d2[i] - c2[i] * dd;
d4_2.w = dd;
dd = d2[i - 1] - c2[i - 1] * dd;
d4_2.z = dd;
dd = d2[i - 2] - c2[i - 2] * dd;
d4_2.y = dd;
dd = d2[i - 3] - c2[i - 3] * dd;
d4_2.x = dd;
dd = d2[i - 4] - c2[i - 4] * dd;
d4.w = dd;
dd = d2[i - 5] - c2[i - 5] * dd;
d4.z = dd;
dd = d2[i - 6] - c2[i - 6] * dd;
d4.y = dd;
dd = d2[i - 7] - c2[i - 7] * dd;
d4.x = dd;
d_d[indg + 1] = d4_2;
d_d[indg] = d4;
}
}
}
//
// new tri-diagonal solve in x-direction
//
__global__ void GPU_adi_x_new(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int j, k, indg, t, tm, tp, nt, shift = 0;
float bbi;
__shared__ float a[256], c[256], d[256];
//
// set up indices for main block
//
t = threadIdx.x;
j = blockIdx.x;
k = blockIdx.y;
indg = t + NX*(j + k*NY);
bbi = 1.0f / d_b[indg];
a[t] = -bbi * d_a[indg];
c[t] = -bbi * d_c[indg];
d[t] = bbi * d_d[indg];
// forward pass
tm = 2 * t;
t = tm + 1;
tp = tm + 2;
for (nt = blockDim.x / 2; nt>0; nt >>= 1) {
shift++;
__syncthreads();
if (threadIdx.x < nt) {
bbi = 1.0f;
if (tm >= 0) {
bbi -= a[t] * c[tm];
d[t] += a[t] * d[tm];
a[t] = a[t] * a[tm];
}
if (tp<NX) {
bbi -= c[t] * a[tp];
d[t] += c[t] * d[tp];
c[t] = c[t] * c[tp];
}
bbi = 1.0f / bbi;
d[t] *= bbi;
a[t] *= bbi;
c[t] *= bbi;
tm = 2 * tm + 1;
t = 2 * t + 1;
tp = 2 * tp + 1;
}
}
// reverse pass
for (; shift>0; shift--) {
nt = blockDim.x >> shift;
__syncthreads();
if (threadIdx.x < nt) {
tm >>= 1;
t >>= 1;
tp >>= 1;
if (tm >= 0) d[tm] += c[tm] * d[t];
if (tp<NX) d[tp] += a[tp] * d[t];
}
}
__syncthreads();
d_d[indg] = d[threadIdx.x];
}
//
// old tri-diagonal solve in x-direction
//
__global__ void GPU_adi_x(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int i, j, k, indg;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x; // global indices
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY);
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (i = 1; i<NX; i++) {
indg = indg + 1;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[i] = cc;
d[i] = dd;
}
//
// reverse pass
//
d_d[indg] = dd;
for (i = NX - 2; i >= 0; i--) {
indg = indg - 1;
dd = d[i] - c[i] * dd;
d_d[indg] = dd;
}
}
}
//
// tri-diagonal solve in y-direction
//
__global__ void GPU_adi_y(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int i, j, k, indg;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*blockDim.x; // global indices
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = i + k*NX*NY;
if ((i<NX) && (k<NZ)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (j = 1; j<NY; j++) {
indg = indg + NX;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[j] = cc;
d[j] = dd;
}
//
// reverse pass
//
d_d[indg] = dd;
for (j = NY - 2; j >= 0; j--) {
indg = indg - NX;
dd = d[j] - c[j] * dd;
d_d[indg] = dd;
}
}
}
//
// tri-diagonal solve in z-direction, and update solution
//
__global__ void GPU_adi_z(int NX, int NY, int NZ,
float* __restrict__ d_u,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
const float* __restrict__ d_d)
{
int i, j, k, indg, off;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*blockDim.x; // global indices
j = threadIdx.y + blockIdx.y*blockDim.y;
indg = i + j*NX;
off = NX*NY;
if ((i<NX) && (j<NY)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (k = 1; k<NZ; k++) {
indg = indg + off;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[k] = cc;
d[k] = dd;
}
//
// reverse pass
//
d_u[indg] += dd;
for (k = NZ - 2; k >= 0; k--) {
indg = indg - off;
dd = d[k] - c[k] * dd;
d_u[indg] += dd;
}
}
} | .file "tmpxft_0017205a_00000000-6_adi3d_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.type _Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, @function
_Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_:
.LFB2051:
.cfi_startproc
endbr64
subq $312, %rsp
.cfi_def_cfa_offset 320
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movss %xmm0, (%rsp)
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
leaq 4(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
movq %rcx, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 208(%rsp)
movq %r8, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 216(%rsp)
movq %r9, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 224(%rsp)
movq 320(%rsp), %rax
movq %rax, 48(%rsp)
leaq 48(%rsp), %rax
movq %rax, 232(%rsp)
movq 328(%rsp), %rax
movq %rax, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 240(%rsp)
movq 336(%rsp), %rax
movq %rax, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, 248(%rsp)
movq 344(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 256(%rsp)
movq 352(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 264(%rsp)
movq 360(%rsp), %rax
movq %rax, 88(%rsp)
leaq 88(%rsp), %rax
movq %rax, 272(%rsp)
movq 368(%rsp), %rax
movq %rax, 96(%rsp)
leaq 96(%rsp), %rax
movq %rax, 280(%rsp)
movq 376(%rsp), %rax
movq %rax, 104(%rsp)
leaq 104(%rsp), %rax
movq %rax, 288(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
movl $1, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $1, 148(%rsp)
leaq 120(%rsp), %rcx
leaq 112(%rsp), %rdx
leaq 140(%rsp), %rsi
leaq 128(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 120(%rsp)
.cfi_def_cfa_offset 328
pushq 120(%rsp)
.cfi_def_cfa_offset 336
leaq 192(%rsp), %r9
movq 156(%rsp), %rcx
movl 164(%rsp), %r8d
movq 144(%rsp), %rsi
movl 152(%rsp), %edx
leaq _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 320
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, .-_Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.globl _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.type _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, @function
_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 72(%rsp)
.cfi_def_cfa_offset 24
pushq 72(%rsp)
.cfi_def_cfa_offset 32
pushq 72(%rsp)
.cfi_def_cfa_offset 40
pushq 72(%rsp)
.cfi_def_cfa_offset 48
pushq 72(%rsp)
.cfi_def_cfa_offset 56
pushq 72(%rsp)
.cfi_def_cfa_offset 64
pushq 72(%rsp)
.cfi_def_cfa_offset 72
pushq 72(%rsp)
.cfi_def_cfa_offset 80
call _Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
addq $72, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, .-_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.globl _Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
.type _Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_, @function
_Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_:
.LFB2053:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
movq %r8, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
movq %r9, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
movq 192(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_, .-_Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
.globl _Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_
.type _Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_, @function
_Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_:
.LFB2054:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_, .-_Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_
.globl _Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
.type _Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_, @function
_Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_:
.LFB2055:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
movq %r8, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
movq %r9, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
movq 192(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_, .-_Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
.globl _Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_
.type _Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_, @function
_Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_:
.LFB2056:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_, .-_Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_
.globl _Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.type _Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, @function
_Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf:
.LFB2057:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
movq %r8, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
movq %r9, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
movq 192(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z13GPU_adi_x_newiiiPKfS0_S0_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, .-_Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.globl _Z13GPU_adi_x_newiiiPKfS0_S0_Pf
.type _Z13GPU_adi_x_newiiiPKfS0_S0_Pf, @function
_Z13GPU_adi_x_newiiiPKfS0_S0_Pf:
.LFB2058:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z13GPU_adi_x_newiiiPKfS0_S0_Pf, .-_Z13GPU_adi_x_newiiiPKfS0_S0_Pf
.globl _Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.type _Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, @function
_Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf:
.LFB2059:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
movq %r8, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
movq %r9, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
movq 192(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9GPU_adi_xiiiPKfS0_S0_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, .-_Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.globl _Z9GPU_adi_xiiiPKfS0_S0_Pf
.type _Z9GPU_adi_xiiiPKfS0_S0_Pf, @function
_Z9GPU_adi_xiiiPKfS0_S0_Pf:
.LFB2060:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z9GPU_adi_xiiiPKfS0_S0_Pf, .-_Z9GPU_adi_xiiiPKfS0_S0_Pf
.globl _Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.type _Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, @function
_Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf:
.LFB2061:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
movq %r8, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
movq %r9, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
movq 192(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L47
.L43:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L48
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9GPU_adi_yiiiPKfS0_S0_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, .-_Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.globl _Z9GPU_adi_yiiiPKfS0_S0_Pf
.type _Z9GPU_adi_yiiiPKfS0_S0_Pf, @function
_Z9GPU_adi_yiiiPKfS0_S0_Pf:
.LFB2062:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _Z9GPU_adi_yiiiPKfS0_S0_Pf, .-_Z9GPU_adi_yiiiPKfS0_S0_Pf
.globl _Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_
.type _Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_, @function
_Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_:
.LFB2063:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rcx, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
movq %r8, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 160(%rsp)
movq %r9, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 168(%rsp)
movq 224(%rsp), %rax
movq %rax, 48(%rsp)
leaq 48(%rsp), %rax
movq %rax, 176(%rsp)
movq 232(%rsp), %rax
movq %rax, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 184(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L55
.L51:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L56
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z9GPU_adi_ziiiPfPKfS1_S1_S1_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L51
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size _Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_, .-_Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_
.globl _Z9GPU_adi_ziiiPfPKfS1_S1_S1_
.type _Z9GPU_adi_ziiiPfPKfS1_S1_S1_, @function
_Z9GPU_adi_ziiiPfPKfS1_S1_S1_:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _Z9GPU_adi_ziiiPfPKfS1_S1_S1_, .-_Z9GPU_adi_ziiiPfPKfS1_S1_S1_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9GPU_adi_ziiiPfPKfS1_S1_S1_"
.LC1:
.string "_Z9GPU_adi_yiiiPKfS0_S0_Pf"
.LC2:
.string "_Z9GPU_adi_xiiiPKfS0_S0_Pf"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "_Z13GPU_adi_x_newiiiPKfS0_S0_Pf"
.align 8
.LC4:
.string "_Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_"
.align 8
.LC5:
.string "_Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_"
.align 8
.LC6:
.string "_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2066:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9GPU_adi_ziiiPfPKfS1_S1_S1_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9GPU_adi_yiiiPKfS0_S0_Pf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z9GPU_adi_xiiiPKfS0_S0_Pf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z13GPU_adi_x_newiiiPKfS0_S0_Pf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | ////////////////////////////////////////////////////////////////////////
// define kernel block size for
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 32
#define BLOCK_Y 8
// device code
__global__ void GPU_adi_rhs(int NX, int NY, int NZ, float lam,
const float* __restrict__ d_u,
float* __restrict__ d_du,
float* __restrict__ d_ax,
float* __restrict__ d_bx,
float* __restrict__ d_cx,
float* __restrict__ d_ay,
float* __restrict__ d_by,
float* __restrict__ d_cy,
float* __restrict__ d_az,
float* __restrict__ d_bz,
float* __restrict__ d_cz)
{
int i, j, k, indg, active;
float du, a, b, c;
int NXM1 = NX - 1;
int NYM1 = NY - 1;
int NZM1 = NZ - 1;
#define IOFF 1
#define JOFF NX
#define KOFF NX*NY
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*BLOCK_X;
j = threadIdx.y + blockIdx.y*BLOCK_Y;
indg = i + j*NX;
active = (i<NX) && (j<NY);
//
// loop over k-planes
//
for (k = 0; k<NZ; k++) {
//
// calculate r.h.s. and set a,b,c, coefficients
//
if (active) {
if (i == 0 || i == NXM1 || j == 0 || j == NYM1 || k == 0 || k == NZM1) {
du = 0.0f; // Dirichlet b.c.'s
a = 0.0f;
b = 1.0f;
c = 0.0f;
}
else {
du = lam * (d_u[indg - IOFF] + d_u[indg + IOFF]
+ d_u[indg - JOFF] + d_u[indg + JOFF]
+ d_u[indg - KOFF] + d_u[indg + KOFF] - 6.0f*d_u[indg]);
a = -0.5f*lam;
b = 1.0f + lam;
c = -0.5f*lam;
}
d_du[indg] = du;
d_ax[indg] = a;
d_bx[indg] = b;
d_cx[indg] = c;
d_ay[indg] = a;
d_by[indg] = b;
d_cy[indg] = c;
d_az[indg] = a;
d_bz[indg] = b;
d_cz[indg] = c;
indg += KOFF;
}
}
}
__global__ void GPU_adi_x_float4(int NX, int NY, int NZ,
const float4* __restrict__ d_a,
const float4* __restrict__ d_b,
const float4* __restrict__ d_c,
float4* __restrict__ d_d) {
int i, j, k, indg;
float aa, bb, cc, dd, c2[256], d2[256];
float4 a4, b4, c4, d4;
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x;
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY) / 4;
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
a4 = d_a[indg];
b4 = d_b[indg];
c4 = d_c[indg];
d4 = d_d[indg];
bb = 1.0f / b4.x;
cc = bb * c4.x;
dd = bb * d4.x;
c2[0] = cc;
d2[0] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[1] = cc;
d2[1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[2] = cc;
d2[2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[3] = cc;
d2[3] = dd;
for (i = 4; i<NX; i += 4) {
indg = indg + 1;
a4 = d_a[indg];
b4 = d_b[indg];
c4 = d_c[indg];
d4 = d_d[indg];
aa = a4.x;
bb = b4.x - aa*cc;
dd = d4.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.x;
dd = bb*dd;
c2[i] = cc;
d2[i] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[i + 1] = cc;
d2[i + 1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[i + 2] = cc;
d2[i + 2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[i + 3] = cc;
d2[i + 3] = dd;
}
//
// reverse pass
//
d4.w = dd;
dd = d2[NX - 2] - c2[NX - 2] * dd;
d4.z = dd;
dd = d2[NX - 3] - c2[NX - 3] * dd;
d4.y = dd;
dd = d2[NX - 4] - c2[NX - 4] * dd;
d4.x = dd;
d_d[indg] = d4;
for (i = NX - 5; i >= 0; i -= 4) {
indg = indg - 1;
dd = d2[i] - c2[i] * dd;
d4.w = dd;
dd = d2[i - 1] - c2[i - 1] * dd;
d4.z = dd;
dd = d2[i - 2] - c2[i - 2] * dd;
d4.y = dd;
dd = d2[i - 3] - c2[i - 3] * dd;
d4.x = dd;
d_d[indg] = d4;
}
}
}
__global__ void GPU_adi_x_float4_2(int NX, int NY, int NZ,
const float4* __restrict__ d_a,
const float4* __restrict__ d_b,
const float4* __restrict__ d_c,
float4* __restrict__ d_d) {
int i, j, k, indg;
float aa, bb, cc, dd, c2[256], d2[256];
float4 a4, b4, c4, d4, a4_2, b4_2, c4_2, d4_2;
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x;
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY) / 4;
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
a4 = d_a[indg];
a4_2 = d_a[indg + 1];
b4 = d_b[indg];
b4_2 = d_b[indg + 1];
c4 = d_c[indg];
c4_2 = d_c[indg + 1];
d4 = d_d[indg];
d4_2 = d_d[indg + 1];
bb = 1.0f / b4.x;
cc = bb * c4.x;
dd = bb * d4.x;
c2[0] = cc;
d2[0] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[1] = cc;
d2[1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[2] = cc;
d2[2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[3] = cc;
d2[3] = dd;
aa = a4_2.x;
bb = b4_2.x - aa*cc;
dd = d4_2.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.x;
dd = bb*dd;
c2[4] = cc;
d2[4] = dd;
aa = a4_2.y;
bb = b4_2.y - aa*cc;
dd = d4_2.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.y;
dd = bb*dd;
c2[5] = cc;
d2[5] = dd;
aa = a4_2.z;
bb = b4_2.z - aa*cc;
dd = d4_2.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.z;
dd = bb*dd;
c2[6] = cc;
d2[6] = dd;
aa = a4_2.w;
bb = b4_2.w - aa*cc;
dd = d4_2.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.w;
dd = bb*dd;
c2[7] = cc;
d2[7] = dd;
for (i = 8; i<NX; i += 8) {
indg = indg + 2;
a4 = d_a[indg];
a4_2 = d_a[indg + 1];
__threadfence_block();
b4 = d_b[indg];
b4_2 = d_b[indg + 1];
__threadfence_block();
c4 = d_c[indg];
c4_2 = d_c[indg + 1];
__threadfence_block();
d4 = d_d[indg];
d4_2 = d_d[indg + 1];
aa = a4.x;
bb = b4.x - aa*cc;
dd = d4.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.x;
dd = bb*dd;
c2[i] = cc;
d2[i] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[i + 1] = cc;
d2[i + 1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[i + 2] = cc;
d2[i + 2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[i + 3] = cc;
d2[i + 3] = dd;
aa = a4_2.x;
bb = b4_2.x - aa*cc;
dd = d4_2.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.x;
dd = bb*dd;
c2[i + 4] = cc;
d2[i + 4] = dd;
aa = a4_2.y;
bb = b4_2.y - aa*cc;
dd = d4_2.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.y;
dd = bb*dd;
c2[i + 5] = cc;
d2[i + 5] = dd;
aa = a4_2.z;
bb = b4_2.z - aa*cc;
dd = d4_2.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.z;
dd = bb*dd;
c2[i + 6] = cc;
d2[i + 6] = dd;
aa = a4_2.w;
bb = b4_2.w - aa*cc;
dd = d4_2.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.w;
dd = bb*dd;
c2[i + 7] = cc;
d2[i + 7] = dd;
}
//
// reverse pass
//
d4_2.w = dd;
dd = d2[NX - 2] - c2[NX - 2] * dd;
d4_2.z = dd;
dd = d2[NX - 3] - c2[NX - 3] * dd;
d4_2.y = dd;
dd = d2[NX - 4] - c2[NX - 4] * dd;
d4_2.x = dd;
dd = d2[NX - 5] - c2[NX - 5] * dd;
d4.w = dd;
dd = d2[NX - 6] - c2[NX - 6] * dd;
d4.z = dd;
dd = d2[NX - 7] - c2[NX - 7] * dd;
d4.y = dd;
dd = d2[NX - 8] - c2[NX - 8] * dd;
d4.x = dd;
d_d[indg + 1] = d4_2;
d_d[indg] = d4;
for (i = NX - 9; i >= 0; i -= 8) {
indg = indg - 2;
dd = d2[i] - c2[i] * dd;
d4_2.w = dd;
dd = d2[i - 1] - c2[i - 1] * dd;
d4_2.z = dd;
dd = d2[i - 2] - c2[i - 2] * dd;
d4_2.y = dd;
dd = d2[i - 3] - c2[i - 3] * dd;
d4_2.x = dd;
dd = d2[i - 4] - c2[i - 4] * dd;
d4.w = dd;
dd = d2[i - 5] - c2[i - 5] * dd;
d4.z = dd;
dd = d2[i - 6] - c2[i - 6] * dd;
d4.y = dd;
dd = d2[i - 7] - c2[i - 7] * dd;
d4.x = dd;
d_d[indg + 1] = d4_2;
d_d[indg] = d4;
}
}
}
//
// new tri-diagonal solve in x-direction
//
__global__ void GPU_adi_x_new(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int j, k, indg, t, tm, tp, nt, shift = 0;
float bbi;
__shared__ float a[256], c[256], d[256];
//
// set up indices for main block
//
t = threadIdx.x;
j = blockIdx.x;
k = blockIdx.y;
indg = t + NX*(j + k*NY);
bbi = 1.0f / d_b[indg];
a[t] = -bbi * d_a[indg];
c[t] = -bbi * d_c[indg];
d[t] = bbi * d_d[indg];
// forward pass
tm = 2 * t;
t = tm + 1;
tp = tm + 2;
for (nt = blockDim.x / 2; nt>0; nt >>= 1) {
shift++;
__syncthreads();
if (threadIdx.x < nt) {
bbi = 1.0f;
if (tm >= 0) {
bbi -= a[t] * c[tm];
d[t] += a[t] * d[tm];
a[t] = a[t] * a[tm];
}
if (tp<NX) {
bbi -= c[t] * a[tp];
d[t] += c[t] * d[tp];
c[t] = c[t] * c[tp];
}
bbi = 1.0f / bbi;
d[t] *= bbi;
a[t] *= bbi;
c[t] *= bbi;
tm = 2 * tm + 1;
t = 2 * t + 1;
tp = 2 * tp + 1;
}
}
// reverse pass
for (; shift>0; shift--) {
nt = blockDim.x >> shift;
__syncthreads();
if (threadIdx.x < nt) {
tm >>= 1;
t >>= 1;
tp >>= 1;
if (tm >= 0) d[tm] += c[tm] * d[t];
if (tp<NX) d[tp] += a[tp] * d[t];
}
}
__syncthreads();
d_d[indg] = d[threadIdx.x];
}
//
// old tri-diagonal solve in x-direction
//
__global__ void GPU_adi_x(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int i, j, k, indg;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x; // global indices
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY);
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (i = 1; i<NX; i++) {
indg = indg + 1;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[i] = cc;
d[i] = dd;
}
//
// reverse pass
//
d_d[indg] = dd;
for (i = NX - 2; i >= 0; i--) {
indg = indg - 1;
dd = d[i] - c[i] * dd;
d_d[indg] = dd;
}
}
}
//
// tri-diagonal solve in y-direction
//
__global__ void GPU_adi_y(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int i, j, k, indg;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*blockDim.x; // global indices
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = i + k*NX*NY;
if ((i<NX) && (k<NZ)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (j = 1; j<NY; j++) {
indg = indg + NX;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[j] = cc;
d[j] = dd;
}
//
// reverse pass
//
d_d[indg] = dd;
for (j = NY - 2; j >= 0; j--) {
indg = indg - NX;
dd = d[j] - c[j] * dd;
d_d[indg] = dd;
}
}
}
//
// tri-diagonal solve in z-direction, and update solution
//
__global__ void GPU_adi_z(int NX, int NY, int NZ,
float* __restrict__ d_u,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
const float* __restrict__ d_d)
{
int i, j, k, indg, off;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*blockDim.x; // global indices
j = threadIdx.y + blockIdx.y*blockDim.y;
indg = i + j*NX;
off = NX*NY;
if ((i<NX) && (j<NY)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (k = 1; k<NZ; k++) {
indg = indg + off;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[k] = cc;
d[k] = dd;
}
//
// reverse pass
//
d_u[indg] += dd;
for (k = NZ - 2; k >= 0; k--) {
indg = indg - off;
dd = d[k] - c[k] * dd;
d_u[indg] += dd;
}
}
} | #include <hip/hip_runtime.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size for
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 32
#define BLOCK_Y 8
// device code
__global__ void GPU_adi_rhs(int NX, int NY, int NZ, float lam,
const float* __restrict__ d_u,
float* __restrict__ d_du,
float* __restrict__ d_ax,
float* __restrict__ d_bx,
float* __restrict__ d_cx,
float* __restrict__ d_ay,
float* __restrict__ d_by,
float* __restrict__ d_cy,
float* __restrict__ d_az,
float* __restrict__ d_bz,
float* __restrict__ d_cz)
{
int i, j, k, indg, active;
float du, a, b, c;
int NXM1 = NX - 1;
int NYM1 = NY - 1;
int NZM1 = NZ - 1;
#define IOFF 1
#define JOFF NX
#define KOFF NX*NY
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*BLOCK_X;
j = threadIdx.y + blockIdx.y*BLOCK_Y;
indg = i + j*NX;
active = (i<NX) && (j<NY);
//
// loop over k-planes
//
for (k = 0; k<NZ; k++) {
//
// calculate r.h.s. and set a,b,c, coefficients
//
if (active) {
if (i == 0 || i == NXM1 || j == 0 || j == NYM1 || k == 0 || k == NZM1) {
du = 0.0f; // Dirichlet b.c.'s
a = 0.0f;
b = 1.0f;
c = 0.0f;
}
else {
du = lam * (d_u[indg - IOFF] + d_u[indg + IOFF]
+ d_u[indg - JOFF] + d_u[indg + JOFF]
+ d_u[indg - KOFF] + d_u[indg + KOFF] - 6.0f*d_u[indg]);
a = -0.5f*lam;
b = 1.0f + lam;
c = -0.5f*lam;
}
d_du[indg] = du;
d_ax[indg] = a;
d_bx[indg] = b;
d_cx[indg] = c;
d_ay[indg] = a;
d_by[indg] = b;
d_cy[indg] = c;
d_az[indg] = a;
d_bz[indg] = b;
d_cz[indg] = c;
indg += KOFF;
}
}
}
__global__ void GPU_adi_x_float4(int NX, int NY, int NZ,
const float4* __restrict__ d_a,
const float4* __restrict__ d_b,
const float4* __restrict__ d_c,
float4* __restrict__ d_d) {
int i, j, k, indg;
float aa, bb, cc, dd, c2[256], d2[256];
float4 a4, b4, c4, d4;
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x;
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY) / 4;
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
a4 = d_a[indg];
b4 = d_b[indg];
c4 = d_c[indg];
d4 = d_d[indg];
bb = 1.0f / b4.x;
cc = bb * c4.x;
dd = bb * d4.x;
c2[0] = cc;
d2[0] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[1] = cc;
d2[1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[2] = cc;
d2[2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[3] = cc;
d2[3] = dd;
for (i = 4; i<NX; i += 4) {
indg = indg + 1;
a4 = d_a[indg];
b4 = d_b[indg];
c4 = d_c[indg];
d4 = d_d[indg];
aa = a4.x;
bb = b4.x - aa*cc;
dd = d4.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.x;
dd = bb*dd;
c2[i] = cc;
d2[i] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[i + 1] = cc;
d2[i + 1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[i + 2] = cc;
d2[i + 2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[i + 3] = cc;
d2[i + 3] = dd;
}
//
// reverse pass
//
d4.w = dd;
dd = d2[NX - 2] - c2[NX - 2] * dd;
d4.z = dd;
dd = d2[NX - 3] - c2[NX - 3] * dd;
d4.y = dd;
dd = d2[NX - 4] - c2[NX - 4] * dd;
d4.x = dd;
d_d[indg] = d4;
for (i = NX - 5; i >= 0; i -= 4) {
indg = indg - 1;
dd = d2[i] - c2[i] * dd;
d4.w = dd;
dd = d2[i - 1] - c2[i - 1] * dd;
d4.z = dd;
dd = d2[i - 2] - c2[i - 2] * dd;
d4.y = dd;
dd = d2[i - 3] - c2[i - 3] * dd;
d4.x = dd;
d_d[indg] = d4;
}
}
}
__global__ void GPU_adi_x_float4_2(int NX, int NY, int NZ,
const float4* __restrict__ d_a,
const float4* __restrict__ d_b,
const float4* __restrict__ d_c,
float4* __restrict__ d_d) {
int i, j, k, indg;
float aa, bb, cc, dd, c2[256], d2[256];
float4 a4, b4, c4, d4, a4_2, b4_2, c4_2, d4_2;
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x;
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY) / 4;
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
a4 = d_a[indg];
a4_2 = d_a[indg + 1];
b4 = d_b[indg];
b4_2 = d_b[indg + 1];
c4 = d_c[indg];
c4_2 = d_c[indg + 1];
d4 = d_d[indg];
d4_2 = d_d[indg + 1];
bb = 1.0f / b4.x;
cc = bb * c4.x;
dd = bb * d4.x;
c2[0] = cc;
d2[0] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[1] = cc;
d2[1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[2] = cc;
d2[2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[3] = cc;
d2[3] = dd;
aa = a4_2.x;
bb = b4_2.x - aa*cc;
dd = d4_2.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.x;
dd = bb*dd;
c2[4] = cc;
d2[4] = dd;
aa = a4_2.y;
bb = b4_2.y - aa*cc;
dd = d4_2.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.y;
dd = bb*dd;
c2[5] = cc;
d2[5] = dd;
aa = a4_2.z;
bb = b4_2.z - aa*cc;
dd = d4_2.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.z;
dd = bb*dd;
c2[6] = cc;
d2[6] = dd;
aa = a4_2.w;
bb = b4_2.w - aa*cc;
dd = d4_2.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.w;
dd = bb*dd;
c2[7] = cc;
d2[7] = dd;
for (i = 8; i<NX; i += 8) {
indg = indg + 2;
a4 = d_a[indg];
a4_2 = d_a[indg + 1];
__threadfence_block();
b4 = d_b[indg];
b4_2 = d_b[indg + 1];
__threadfence_block();
c4 = d_c[indg];
c4_2 = d_c[indg + 1];
__threadfence_block();
d4 = d_d[indg];
d4_2 = d_d[indg + 1];
aa = a4.x;
bb = b4.x - aa*cc;
dd = d4.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.x;
dd = bb*dd;
c2[i] = cc;
d2[i] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[i + 1] = cc;
d2[i + 1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[i + 2] = cc;
d2[i + 2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[i + 3] = cc;
d2[i + 3] = dd;
aa = a4_2.x;
bb = b4_2.x - aa*cc;
dd = d4_2.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.x;
dd = bb*dd;
c2[i + 4] = cc;
d2[i + 4] = dd;
aa = a4_2.y;
bb = b4_2.y - aa*cc;
dd = d4_2.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.y;
dd = bb*dd;
c2[i + 5] = cc;
d2[i + 5] = dd;
aa = a4_2.z;
bb = b4_2.z - aa*cc;
dd = d4_2.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.z;
dd = bb*dd;
c2[i + 6] = cc;
d2[i + 6] = dd;
aa = a4_2.w;
bb = b4_2.w - aa*cc;
dd = d4_2.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.w;
dd = bb*dd;
c2[i + 7] = cc;
d2[i + 7] = dd;
}
//
// reverse pass
//
d4_2.w = dd;
dd = d2[NX - 2] - c2[NX - 2] * dd;
d4_2.z = dd;
dd = d2[NX - 3] - c2[NX - 3] * dd;
d4_2.y = dd;
dd = d2[NX - 4] - c2[NX - 4] * dd;
d4_2.x = dd;
dd = d2[NX - 5] - c2[NX - 5] * dd;
d4.w = dd;
dd = d2[NX - 6] - c2[NX - 6] * dd;
d4.z = dd;
dd = d2[NX - 7] - c2[NX - 7] * dd;
d4.y = dd;
dd = d2[NX - 8] - c2[NX - 8] * dd;
d4.x = dd;
d_d[indg + 1] = d4_2;
d_d[indg] = d4;
for (i = NX - 9; i >= 0; i -= 8) {
indg = indg - 2;
dd = d2[i] - c2[i] * dd;
d4_2.w = dd;
dd = d2[i - 1] - c2[i - 1] * dd;
d4_2.z = dd;
dd = d2[i - 2] - c2[i - 2] * dd;
d4_2.y = dd;
dd = d2[i - 3] - c2[i - 3] * dd;
d4_2.x = dd;
dd = d2[i - 4] - c2[i - 4] * dd;
d4.w = dd;
dd = d2[i - 5] - c2[i - 5] * dd;
d4.z = dd;
dd = d2[i - 6] - c2[i - 6] * dd;
d4.y = dd;
dd = d2[i - 7] - c2[i - 7] * dd;
d4.x = dd;
d_d[indg + 1] = d4_2;
d_d[indg] = d4;
}
}
}
//
// new tri-diagonal solve in x-direction
//
__global__ void GPU_adi_x_new(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int j, k, indg, t, tm, tp, nt, shift = 0;
float bbi;
__shared__ float a[256], c[256], d[256];
//
// set up indices for main block
//
t = threadIdx.x;
j = blockIdx.x;
k = blockIdx.y;
indg = t + NX*(j + k*NY);
bbi = 1.0f / d_b[indg];
a[t] = -bbi * d_a[indg];
c[t] = -bbi * d_c[indg];
d[t] = bbi * d_d[indg];
// forward pass
tm = 2 * t;
t = tm + 1;
tp = tm + 2;
for (nt = blockDim.x / 2; nt>0; nt >>= 1) {
shift++;
__syncthreads();
if (threadIdx.x < nt) {
bbi = 1.0f;
if (tm >= 0) {
bbi -= a[t] * c[tm];
d[t] += a[t] * d[tm];
a[t] = a[t] * a[tm];
}
if (tp<NX) {
bbi -= c[t] * a[tp];
d[t] += c[t] * d[tp];
c[t] = c[t] * c[tp];
}
bbi = 1.0f / bbi;
d[t] *= bbi;
a[t] *= bbi;
c[t] *= bbi;
tm = 2 * tm + 1;
t = 2 * t + 1;
tp = 2 * tp + 1;
}
}
// reverse pass
for (; shift>0; shift--) {
nt = blockDim.x >> shift;
__syncthreads();
if (threadIdx.x < nt) {
tm >>= 1;
t >>= 1;
tp >>= 1;
if (tm >= 0) d[tm] += c[tm] * d[t];
if (tp<NX) d[tp] += a[tp] * d[t];
}
}
__syncthreads();
d_d[indg] = d[threadIdx.x];
}
//
// old tri-diagonal solve in x-direction
//
__global__ void GPU_adi_x(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int i, j, k, indg;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x; // global indices
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY);
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (i = 1; i<NX; i++) {
indg = indg + 1;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[i] = cc;
d[i] = dd;
}
//
// reverse pass
//
d_d[indg] = dd;
for (i = NX - 2; i >= 0; i--) {
indg = indg - 1;
dd = d[i] - c[i] * dd;
d_d[indg] = dd;
}
}
}
//
// tri-diagonal solve in y-direction
//
__global__ void GPU_adi_y(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int i, j, k, indg;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*blockDim.x; // global indices
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = i + k*NX*NY;
if ((i<NX) && (k<NZ)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (j = 1; j<NY; j++) {
indg = indg + NX;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[j] = cc;
d[j] = dd;
}
//
// reverse pass
//
d_d[indg] = dd;
for (j = NY - 2; j >= 0; j--) {
indg = indg - NX;
dd = d[j] - c[j] * dd;
d_d[indg] = dd;
}
}
}
//
// tri-diagonal solve in z-direction, and update solution
//
__global__ void GPU_adi_z(int NX, int NY, int NZ,
float* __restrict__ d_u,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
const float* __restrict__ d_d)
{
int i, j, k, indg, off;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*blockDim.x; // global indices
j = threadIdx.y + blockIdx.y*blockDim.y;
indg = i + j*NX;
off = NX*NY;
if ((i<NX) && (j<NY)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (k = 1; k<NZ; k++) {
indg = indg + off;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[k] = cc;
d[k] = dd;
}
//
// reverse pass
//
d_u[indg] += dd;
for (k = NZ - 2; k >= 0; k--) {
indg = indg - off;
dd = d[k] - c[k] * dd;
d_u[indg] += dd;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size for
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 32
#define BLOCK_Y 8
// device code
__global__ void GPU_adi_rhs(int NX, int NY, int NZ, float lam,
const float* __restrict__ d_u,
float* __restrict__ d_du,
float* __restrict__ d_ax,
float* __restrict__ d_bx,
float* __restrict__ d_cx,
float* __restrict__ d_ay,
float* __restrict__ d_by,
float* __restrict__ d_cy,
float* __restrict__ d_az,
float* __restrict__ d_bz,
float* __restrict__ d_cz)
{
int i, j, k, indg, active;
float du, a, b, c;
int NXM1 = NX - 1;
int NYM1 = NY - 1;
int NZM1 = NZ - 1;
#define IOFF 1
#define JOFF NX
#define KOFF NX*NY
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*BLOCK_X;
j = threadIdx.y + blockIdx.y*BLOCK_Y;
indg = i + j*NX;
active = (i<NX) && (j<NY);
//
// loop over k-planes
//
for (k = 0; k<NZ; k++) {
//
// calculate r.h.s. and set a,b,c, coefficients
//
if (active) {
if (i == 0 || i == NXM1 || j == 0 || j == NYM1 || k == 0 || k == NZM1) {
du = 0.0f; // Dirichlet b.c.'s
a = 0.0f;
b = 1.0f;
c = 0.0f;
}
else {
du = lam * (d_u[indg - IOFF] + d_u[indg + IOFF]
+ d_u[indg - JOFF] + d_u[indg + JOFF]
+ d_u[indg - KOFF] + d_u[indg + KOFF] - 6.0f*d_u[indg]);
a = -0.5f*lam;
b = 1.0f + lam;
c = -0.5f*lam;
}
d_du[indg] = du;
d_ax[indg] = a;
d_bx[indg] = b;
d_cx[indg] = c;
d_ay[indg] = a;
d_by[indg] = b;
d_cy[indg] = c;
d_az[indg] = a;
d_bz[indg] = b;
d_cz[indg] = c;
indg += KOFF;
}
}
}
__global__ void GPU_adi_x_float4(int NX, int NY, int NZ,
const float4* __restrict__ d_a,
const float4* __restrict__ d_b,
const float4* __restrict__ d_c,
float4* __restrict__ d_d) {
int i, j, k, indg;
float aa, bb, cc, dd, c2[256], d2[256];
float4 a4, b4, c4, d4;
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x;
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY) / 4;
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
a4 = d_a[indg];
b4 = d_b[indg];
c4 = d_c[indg];
d4 = d_d[indg];
bb = 1.0f / b4.x;
cc = bb * c4.x;
dd = bb * d4.x;
c2[0] = cc;
d2[0] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[1] = cc;
d2[1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[2] = cc;
d2[2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[3] = cc;
d2[3] = dd;
for (i = 4; i<NX; i += 4) {
indg = indg + 1;
a4 = d_a[indg];
b4 = d_b[indg];
c4 = d_c[indg];
d4 = d_d[indg];
aa = a4.x;
bb = b4.x - aa*cc;
dd = d4.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.x;
dd = bb*dd;
c2[i] = cc;
d2[i] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[i + 1] = cc;
d2[i + 1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[i + 2] = cc;
d2[i + 2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[i + 3] = cc;
d2[i + 3] = dd;
}
//
// reverse pass
//
d4.w = dd;
dd = d2[NX - 2] - c2[NX - 2] * dd;
d4.z = dd;
dd = d2[NX - 3] - c2[NX - 3] * dd;
d4.y = dd;
dd = d2[NX - 4] - c2[NX - 4] * dd;
d4.x = dd;
d_d[indg] = d4;
for (i = NX - 5; i >= 0; i -= 4) {
indg = indg - 1;
dd = d2[i] - c2[i] * dd;
d4.w = dd;
dd = d2[i - 1] - c2[i - 1] * dd;
d4.z = dd;
dd = d2[i - 2] - c2[i - 2] * dd;
d4.y = dd;
dd = d2[i - 3] - c2[i - 3] * dd;
d4.x = dd;
d_d[indg] = d4;
}
}
}
__global__ void GPU_adi_x_float4_2(int NX, int NY, int NZ,
const float4* __restrict__ d_a,
const float4* __restrict__ d_b,
const float4* __restrict__ d_c,
float4* __restrict__ d_d) {
int i, j, k, indg;
float aa, bb, cc, dd, c2[256], d2[256];
float4 a4, b4, c4, d4, a4_2, b4_2, c4_2, d4_2;
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x;
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY) / 4;
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
a4 = d_a[indg];
a4_2 = d_a[indg + 1];
b4 = d_b[indg];
b4_2 = d_b[indg + 1];
c4 = d_c[indg];
c4_2 = d_c[indg + 1];
d4 = d_d[indg];
d4_2 = d_d[indg + 1];
bb = 1.0f / b4.x;
cc = bb * c4.x;
dd = bb * d4.x;
c2[0] = cc;
d2[0] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[1] = cc;
d2[1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[2] = cc;
d2[2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[3] = cc;
d2[3] = dd;
aa = a4_2.x;
bb = b4_2.x - aa*cc;
dd = d4_2.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.x;
dd = bb*dd;
c2[4] = cc;
d2[4] = dd;
aa = a4_2.y;
bb = b4_2.y - aa*cc;
dd = d4_2.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.y;
dd = bb*dd;
c2[5] = cc;
d2[5] = dd;
aa = a4_2.z;
bb = b4_2.z - aa*cc;
dd = d4_2.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.z;
dd = bb*dd;
c2[6] = cc;
d2[6] = dd;
aa = a4_2.w;
bb = b4_2.w - aa*cc;
dd = d4_2.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.w;
dd = bb*dd;
c2[7] = cc;
d2[7] = dd;
for (i = 8; i<NX; i += 8) {
indg = indg + 2;
a4 = d_a[indg];
a4_2 = d_a[indg + 1];
__threadfence_block();
b4 = d_b[indg];
b4_2 = d_b[indg + 1];
__threadfence_block();
c4 = d_c[indg];
c4_2 = d_c[indg + 1];
__threadfence_block();
d4 = d_d[indg];
d4_2 = d_d[indg + 1];
aa = a4.x;
bb = b4.x - aa*cc;
dd = d4.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.x;
dd = bb*dd;
c2[i] = cc;
d2[i] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[i + 1] = cc;
d2[i + 1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[i + 2] = cc;
d2[i + 2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[i + 3] = cc;
d2[i + 3] = dd;
aa = a4_2.x;
bb = b4_2.x - aa*cc;
dd = d4_2.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.x;
dd = bb*dd;
c2[i + 4] = cc;
d2[i + 4] = dd;
aa = a4_2.y;
bb = b4_2.y - aa*cc;
dd = d4_2.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.y;
dd = bb*dd;
c2[i + 5] = cc;
d2[i + 5] = dd;
aa = a4_2.z;
bb = b4_2.z - aa*cc;
dd = d4_2.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.z;
dd = bb*dd;
c2[i + 6] = cc;
d2[i + 6] = dd;
aa = a4_2.w;
bb = b4_2.w - aa*cc;
dd = d4_2.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.w;
dd = bb*dd;
c2[i + 7] = cc;
d2[i + 7] = dd;
}
//
// reverse pass
//
d4_2.w = dd;
dd = d2[NX - 2] - c2[NX - 2] * dd;
d4_2.z = dd;
dd = d2[NX - 3] - c2[NX - 3] * dd;
d4_2.y = dd;
dd = d2[NX - 4] - c2[NX - 4] * dd;
d4_2.x = dd;
dd = d2[NX - 5] - c2[NX - 5] * dd;
d4.w = dd;
dd = d2[NX - 6] - c2[NX - 6] * dd;
d4.z = dd;
dd = d2[NX - 7] - c2[NX - 7] * dd;
d4.y = dd;
dd = d2[NX - 8] - c2[NX - 8] * dd;
d4.x = dd;
d_d[indg + 1] = d4_2;
d_d[indg] = d4;
for (i = NX - 9; i >= 0; i -= 8) {
indg = indg - 2;
dd = d2[i] - c2[i] * dd;
d4_2.w = dd;
dd = d2[i - 1] - c2[i - 1] * dd;
d4_2.z = dd;
dd = d2[i - 2] - c2[i - 2] * dd;
d4_2.y = dd;
dd = d2[i - 3] - c2[i - 3] * dd;
d4_2.x = dd;
dd = d2[i - 4] - c2[i - 4] * dd;
d4.w = dd;
dd = d2[i - 5] - c2[i - 5] * dd;
d4.z = dd;
dd = d2[i - 6] - c2[i - 6] * dd;
d4.y = dd;
dd = d2[i - 7] - c2[i - 7] * dd;
d4.x = dd;
d_d[indg + 1] = d4_2;
d_d[indg] = d4;
}
}
}
//
// new tri-diagonal solve in x-direction
//
__global__ void GPU_adi_x_new(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int j, k, indg, t, tm, tp, nt, shift = 0;
float bbi;
__shared__ float a[256], c[256], d[256];
//
// set up indices for main block
//
t = threadIdx.x;
j = blockIdx.x;
k = blockIdx.y;
indg = t + NX*(j + k*NY);
bbi = 1.0f / d_b[indg];
a[t] = -bbi * d_a[indg];
c[t] = -bbi * d_c[indg];
d[t] = bbi * d_d[indg];
// forward pass
tm = 2 * t;
t = tm + 1;
tp = tm + 2;
for (nt = blockDim.x / 2; nt>0; nt >>= 1) {
shift++;
__syncthreads();
if (threadIdx.x < nt) {
bbi = 1.0f;
if (tm >= 0) {
bbi -= a[t] * c[tm];
d[t] += a[t] * d[tm];
a[t] = a[t] * a[tm];
}
if (tp<NX) {
bbi -= c[t] * a[tp];
d[t] += c[t] * d[tp];
c[t] = c[t] * c[tp];
}
bbi = 1.0f / bbi;
d[t] *= bbi;
a[t] *= bbi;
c[t] *= bbi;
tm = 2 * tm + 1;
t = 2 * t + 1;
tp = 2 * tp + 1;
}
}
// reverse pass
for (; shift>0; shift--) {
nt = blockDim.x >> shift;
__syncthreads();
if (threadIdx.x < nt) {
tm >>= 1;
t >>= 1;
tp >>= 1;
if (tm >= 0) d[tm] += c[tm] * d[t];
if (tp<NX) d[tp] += a[tp] * d[t];
}
}
__syncthreads();
d_d[indg] = d[threadIdx.x];
}
//
// old tri-diagonal solve in x-direction
//
__global__ void GPU_adi_x(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int i, j, k, indg;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x; // global indices
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY);
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (i = 1; i<NX; i++) {
indg = indg + 1;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[i] = cc;
d[i] = dd;
}
//
// reverse pass
//
d_d[indg] = dd;
for (i = NX - 2; i >= 0; i--) {
indg = indg - 1;
dd = d[i] - c[i] * dd;
d_d[indg] = dd;
}
}
}
//
// tri-diagonal solve in y-direction
//
__global__ void GPU_adi_y(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int i, j, k, indg;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*blockDim.x; // global indices
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = i + k*NX*NY;
if ((i<NX) && (k<NZ)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (j = 1; j<NY; j++) {
indg = indg + NX;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[j] = cc;
d[j] = dd;
}
//
// reverse pass
//
d_d[indg] = dd;
for (j = NY - 2; j >= 0; j--) {
indg = indg - NX;
dd = d[j] - c[j] * dd;
d_d[indg] = dd;
}
}
}
//
// tri-diagonal solve in z-direction, and update solution
//
__global__ void GPU_adi_z(int NX, int NY, int NZ,
float* __restrict__ d_u,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
const float* __restrict__ d_d)
{
int i, j, k, indg, off;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*blockDim.x; // global indices
j = threadIdx.y + blockIdx.y*blockDim.y;
indg = i + j*NX;
off = NX*NY;
if ((i<NX) && (j<NY)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (k = 1; k<NZ; k++) {
indg = indg + off;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[k] = cc;
d[k] = dd;
}
//
// reverse pass
//
d_u[indg] += dd;
for (k = NZ - 2; k >= 0; k--) {
indg = indg - off;
dd = d[k] - c[k] * dd;
d_u[indg] += dd;
}
}
} | .text
.file "adi3d_kernel.hip"
.globl _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_ # -- Begin function _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.p2align 4, 0x90
.type _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_,@function
_Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_: # @_Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.cfi_startproc
# %bb.0:
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rax
movq %rax, 144(%rsp)
leaq 224(%rsp), %rax
movq %rax, 152(%rsp)
leaq 232(%rsp), %rax
movq %rax, 160(%rsp)
leaq 240(%rsp), %rax
movq %rax, 168(%rsp)
leaq 248(%rsp), %rax
movq %rax, 176(%rsp)
leaq 256(%rsp), %rax
movq %rax, 184(%rsp)
leaq 264(%rsp), %rax
movq %rax, 192(%rsp)
leaq 272(%rsp), %rax
movq %rax, 200(%rsp)
leaq 280(%rsp), %rax
movq %rax, 208(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $232, %rsp
.cfi_adjust_cfa_offset -232
retq
.Lfunc_end0:
.size _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, .Lfunc_end0-_Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.cfi_endproc
# -- End function
.globl _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_ # -- Begin function _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.p2align 4, 0x90
.type _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_,@function
_Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_: # @_Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, .Lfunc_end1-_Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.cfi_endproc
# -- End function
.globl _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_ # -- Begin function _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.p2align 4, 0x90
.type _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_,@function
_Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_: # @_Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, .Lfunc_end2-_Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.cfi_endproc
# -- End function
.globl _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf # -- Begin function _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf
.p2align 4, 0x90
.type _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf,@function
_Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf: # @_Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13GPU_adi_x_newiiiPKfS0_S0_Pf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end3:
.size _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf, .Lfunc_end3-_Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf
.cfi_endproc
# -- End function
.globl _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf # -- Begin function _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf
.p2align 4, 0x90
.type _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf,@function
_Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf: # @_Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9GPU_adi_xiiiPKfS0_S0_Pf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end4:
.size _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf, .Lfunc_end4-_Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf
.cfi_endproc
# -- End function
.globl _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf # -- Begin function _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf
.p2align 4, 0x90
.type _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf,@function
_Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf: # @_Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9GPU_adi_yiiiPKfS0_S0_Pf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end5:
.size _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf, .Lfunc_end5-_Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf
.cfi_endproc
# -- End function
.globl _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_ # -- Begin function _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_
.p2align 4, 0x90
.type _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_,@function
_Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_: # @_Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9GPU_adi_ziiiPfPKfS1_S1_S1_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end6:
.size _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_, .Lfunc_end6-_Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13GPU_adi_x_newiiiPKfS0_S0_Pf, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9GPU_adi_xiiiPKfS0_S0_Pf, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9GPU_adi_yiiiPKfS0_S0_Pf, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9GPU_adi_ziiiPfPKfS1_S1_S1_, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_,@object # @_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.section .rodata,"a",@progbits
.globl _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.p2align 3, 0x0
_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_:
.quad _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.size _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, 8
.type _Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_,@object # @_Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.globl _Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.p2align 3, 0x0
_Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_:
.quad _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.size _Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, 8
.type _Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_,@object # @_Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.globl _Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.p2align 3, 0x0
_Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_:
.quad _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.size _Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, 8
.type _Z13GPU_adi_x_newiiiPKfS0_S0_Pf,@object # @_Z13GPU_adi_x_newiiiPKfS0_S0_Pf
.globl _Z13GPU_adi_x_newiiiPKfS0_S0_Pf
.p2align 3, 0x0
_Z13GPU_adi_x_newiiiPKfS0_S0_Pf:
.quad _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf
.size _Z13GPU_adi_x_newiiiPKfS0_S0_Pf, 8
.type _Z9GPU_adi_xiiiPKfS0_S0_Pf,@object # @_Z9GPU_adi_xiiiPKfS0_S0_Pf
.globl _Z9GPU_adi_xiiiPKfS0_S0_Pf
.p2align 3, 0x0
_Z9GPU_adi_xiiiPKfS0_S0_Pf:
.quad _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf
.size _Z9GPU_adi_xiiiPKfS0_S0_Pf, 8
.type _Z9GPU_adi_yiiiPKfS0_S0_Pf,@object # @_Z9GPU_adi_yiiiPKfS0_S0_Pf
.globl _Z9GPU_adi_yiiiPKfS0_S0_Pf
.p2align 3, 0x0
_Z9GPU_adi_yiiiPKfS0_S0_Pf:
.quad _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf
.size _Z9GPU_adi_yiiiPKfS0_S0_Pf, 8
.type _Z9GPU_adi_ziiiPfPKfS1_S1_S1_,@object # @_Z9GPU_adi_ziiiPfPKfS1_S1_S1_
.globl _Z9GPU_adi_ziiiPfPKfS1_S1_S1_
.p2align 3, 0x0
_Z9GPU_adi_ziiiPfPKfS1_S1_S1_:
.quad _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_
.size _Z9GPU_adi_ziiiPfPKfS1_S1_S1_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_"
.size .L__unnamed_1, 52
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_"
.size .L__unnamed_2, 60
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_"
.size .L__unnamed_3, 62
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z13GPU_adi_x_newiiiPKfS0_S0_Pf"
.size .L__unnamed_4, 32
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z9GPU_adi_xiiiPKfS0_S0_Pf"
.size .L__unnamed_5, 27
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "_Z9GPU_adi_yiiiPKfS0_S0_Pf"
.size .L__unnamed_6, 27
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "_Z9GPU_adi_ziiiPfPKfS1_S1_S1_"
.size .L__unnamed_7, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.addrsig_sym _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.addrsig_sym _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.addrsig_sym _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf
.addrsig_sym _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf
.addrsig_sym _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf
.addrsig_sym _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.addrsig_sym _Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.addrsig_sym _Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.addrsig_sym _Z13GPU_adi_x_newiiiPKfS0_S0_Pf
.addrsig_sym _Z9GPU_adi_xiiiPKfS0_S0_Pf
.addrsig_sym _Z9GPU_adi_yiiiPKfS0_S0_Pf
.addrsig_sym _Z9GPU_adi_ziiiPfPKfS1_S1_S1_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0017205a_00000000-6_adi3d_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.type _Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, @function
_Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_:
.LFB2051:
.cfi_startproc
endbr64
subq $312, %rsp
.cfi_def_cfa_offset 320
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movss %xmm0, (%rsp)
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
leaq 4(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
movq %rcx, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 208(%rsp)
movq %r8, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 216(%rsp)
movq %r9, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 224(%rsp)
movq 320(%rsp), %rax
movq %rax, 48(%rsp)
leaq 48(%rsp), %rax
movq %rax, 232(%rsp)
movq 328(%rsp), %rax
movq %rax, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 240(%rsp)
movq 336(%rsp), %rax
movq %rax, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, 248(%rsp)
movq 344(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 256(%rsp)
movq 352(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 264(%rsp)
movq 360(%rsp), %rax
movq %rax, 88(%rsp)
leaq 88(%rsp), %rax
movq %rax, 272(%rsp)
movq 368(%rsp), %rax
movq %rax, 96(%rsp)
leaq 96(%rsp), %rax
movq %rax, 280(%rsp)
movq 376(%rsp), %rax
movq %rax, 104(%rsp)
leaq 104(%rsp), %rax
movq %rax, 288(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
movl $1, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $1, 148(%rsp)
leaq 120(%rsp), %rcx
leaq 112(%rsp), %rdx
leaq 140(%rsp), %rsi
leaq 128(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 120(%rsp)
.cfi_def_cfa_offset 328
pushq 120(%rsp)
.cfi_def_cfa_offset 336
leaq 192(%rsp), %r9
movq 156(%rsp), %rcx
movl 164(%rsp), %r8d
movq 144(%rsp), %rsi
movl 152(%rsp), %edx
leaq _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 320
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, .-_Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.globl _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.type _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, @function
_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 72(%rsp)
.cfi_def_cfa_offset 24
pushq 72(%rsp)
.cfi_def_cfa_offset 32
pushq 72(%rsp)
.cfi_def_cfa_offset 40
pushq 72(%rsp)
.cfi_def_cfa_offset 48
pushq 72(%rsp)
.cfi_def_cfa_offset 56
pushq 72(%rsp)
.cfi_def_cfa_offset 64
pushq 72(%rsp)
.cfi_def_cfa_offset 72
pushq 72(%rsp)
.cfi_def_cfa_offset 80
call _Z65__device_stub__Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_iiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
addq $72, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, .-_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.globl _Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
.type _Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_, @function
_Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_:
.LFB2053:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
movq %r8, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
movq %r9, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
movq 192(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_, .-_Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
.globl _Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_
.type _Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_, @function
_Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_:
.LFB2054:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z55__device_stub__Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_, .-_Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_
.globl _Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
.type _Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_, @function
_Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_:
.LFB2055:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
movq %r8, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
movq %r9, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
movq 192(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_, .-_Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
.globl _Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_
.type _Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_, @function
_Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_:
.LFB2056:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z57__device_stub__Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_iiiPK6float4S1_S1_PS_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_, .-_Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_
.globl _Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.type _Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, @function
_Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf:
.LFB2057:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
movq %r8, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
movq %r9, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
movq 192(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z13GPU_adi_x_newiiiPKfS0_S0_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, .-_Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.globl _Z13GPU_adi_x_newiiiPKfS0_S0_Pf
.type _Z13GPU_adi_x_newiiiPKfS0_S0_Pf, @function
_Z13GPU_adi_x_newiiiPKfS0_S0_Pf:
.LFB2058:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z45__device_stub__Z13GPU_adi_x_newiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z13GPU_adi_x_newiiiPKfS0_S0_Pf, .-_Z13GPU_adi_x_newiiiPKfS0_S0_Pf
.globl _Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.type _Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, @function
_Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf:
.LFB2059:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
movq %r8, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
movq %r9, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
movq 192(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9GPU_adi_xiiiPKfS0_S0_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, .-_Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.globl _Z9GPU_adi_xiiiPKfS0_S0_Pf
.type _Z9GPU_adi_xiiiPKfS0_S0_Pf, @function
_Z9GPU_adi_xiiiPKfS0_S0_Pf:
.LFB2060:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z9GPU_adi_xiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z9GPU_adi_xiiiPKfS0_S0_Pf, .-_Z9GPU_adi_xiiiPKfS0_S0_Pf
.globl _Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.type _Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, @function
_Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf:
.LFB2061:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
movq %r8, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
movq %r9, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
movq 192(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L47
.L43:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L48
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z9GPU_adi_yiiiPKfS0_S0_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf, .-_Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
.globl _Z9GPU_adi_yiiiPKfS0_S0_Pf
.type _Z9GPU_adi_yiiiPKfS0_S0_Pf, @function
_Z9GPU_adi_yiiiPKfS0_S0_Pf:
.LFB2062:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z9GPU_adi_yiiiPKfS0_S0_PfiiiPKfS0_S0_Pf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _Z9GPU_adi_yiiiPKfS0_S0_Pf, .-_Z9GPU_adi_yiiiPKfS0_S0_Pf
.globl _Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_
.type _Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_, @function
_Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_:
.LFB2063:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movl %edx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rcx, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
movq %r8, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 160(%rsp)
movq %r9, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 168(%rsp)
movq 224(%rsp), %rax
movq %rax, 48(%rsp)
leaq 48(%rsp), %rax
movq %rax, 176(%rsp)
movq 232(%rsp), %rax
movq %rax, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 184(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L55
.L51:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L56
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z9GPU_adi_ziiiPfPKfS1_S1_S1_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L51
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size _Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_, .-_Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_
.globl _Z9GPU_adi_ziiiPfPKfS1_S1_S1_
.type _Z9GPU_adi_ziiiPfPKfS1_S1_S1_, @function
_Z9GPU_adi_ziiiPfPKfS1_S1_S1_:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z43__device_stub__Z9GPU_adi_ziiiPfPKfS1_S1_S1_iiiPfPKfS1_S1_S1_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _Z9GPU_adi_ziiiPfPKfS1_S1_S1_, .-_Z9GPU_adi_ziiiPfPKfS1_S1_S1_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9GPU_adi_ziiiPfPKfS1_S1_S1_"
.LC1:
.string "_Z9GPU_adi_yiiiPKfS0_S0_Pf"
.LC2:
.string "_Z9GPU_adi_xiiiPKfS0_S0_Pf"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "_Z13GPU_adi_x_newiiiPKfS0_S0_Pf"
.align 8
.LC4:
.string "_Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_"
.align 8
.LC5:
.string "_Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_"
.align 8
.LC6:
.string "_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2066:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9GPU_adi_ziiiPfPKfS1_S1_S1_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9GPU_adi_yiiiPKfS0_S0_Pf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z9GPU_adi_xiiiPKfS0_S0_Pf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z13GPU_adi_x_newiiiPKfS0_S0_Pf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z18GPU_adi_x_float4_2iiiPK6float4S1_S1_PS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z16GPU_adi_x_float4iiiPK6float4S1_S1_PS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "adi3d_kernel.hip"
.globl _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_ # -- Begin function _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.p2align 4, 0x90
.type _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_,@function
_Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_: # @_Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.cfi_startproc
# %bb.0:
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rax
movq %rax, 144(%rsp)
leaq 224(%rsp), %rax
movq %rax, 152(%rsp)
leaq 232(%rsp), %rax
movq %rax, 160(%rsp)
leaq 240(%rsp), %rax
movq %rax, 168(%rsp)
leaq 248(%rsp), %rax
movq %rax, 176(%rsp)
leaq 256(%rsp), %rax
movq %rax, 184(%rsp)
leaq 264(%rsp), %rax
movq %rax, 192(%rsp)
leaq 272(%rsp), %rax
movq %rax, 200(%rsp)
leaq 280(%rsp), %rax
movq %rax, 208(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $232, %rsp
.cfi_adjust_cfa_offset -232
retq
.Lfunc_end0:
.size _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, .Lfunc_end0-_Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.cfi_endproc
# -- End function
.globl _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_ # -- Begin function _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.p2align 4, 0x90
.type _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_,@function
_Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_: # @_Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, .Lfunc_end1-_Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.cfi_endproc
# -- End function
.globl _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_ # -- Begin function _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.p2align 4, 0x90
.type _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_,@function
_Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_: # @_Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, .Lfunc_end2-_Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.cfi_endproc
# -- End function
.globl _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf # -- Begin function _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf
.p2align 4, 0x90
.type _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf,@function
_Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf: # @_Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13GPU_adi_x_newiiiPKfS0_S0_Pf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end3:
.size _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf, .Lfunc_end3-_Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf
.cfi_endproc
# -- End function
.globl _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf # -- Begin function _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf
.p2align 4, 0x90
.type _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf,@function
_Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf: # @_Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9GPU_adi_xiiiPKfS0_S0_Pf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end4:
.size _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf, .Lfunc_end4-_Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf
.cfi_endproc
# -- End function
.globl _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf # -- Begin function _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf
.p2align 4, 0x90
.type _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf,@function
_Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf: # @_Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9GPU_adi_yiiiPKfS0_S0_Pf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end5:
.size _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf, .Lfunc_end5-_Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf
.cfi_endproc
# -- End function
.globl _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_ # -- Begin function _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_
.p2align 4, 0x90
.type _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_,@function
_Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_: # @_Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9GPU_adi_ziiiPfPKfS1_S1_S1_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end6:
.size _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_, .Lfunc_end6-_Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13GPU_adi_x_newiiiPKfS0_S0_Pf, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9GPU_adi_xiiiPKfS0_S0_Pf, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9GPU_adi_yiiiPKfS0_S0_Pf, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9GPU_adi_ziiiPfPKfS1_S1_S1_, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_,@object # @_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.section .rodata,"a",@progbits
.globl _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.p2align 3, 0x0
_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_:
.quad _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.size _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_, 8
.type _Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_,@object # @_Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.globl _Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.p2align 3, 0x0
_Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_:
.quad _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.size _Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, 8
.type _Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_,@object # @_Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.globl _Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.p2align 3, 0x0
_Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_:
.quad _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.size _Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_, 8
.type _Z13GPU_adi_x_newiiiPKfS0_S0_Pf,@object # @_Z13GPU_adi_x_newiiiPKfS0_S0_Pf
.globl _Z13GPU_adi_x_newiiiPKfS0_S0_Pf
.p2align 3, 0x0
_Z13GPU_adi_x_newiiiPKfS0_S0_Pf:
.quad _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf
.size _Z13GPU_adi_x_newiiiPKfS0_S0_Pf, 8
.type _Z9GPU_adi_xiiiPKfS0_S0_Pf,@object # @_Z9GPU_adi_xiiiPKfS0_S0_Pf
.globl _Z9GPU_adi_xiiiPKfS0_S0_Pf
.p2align 3, 0x0
_Z9GPU_adi_xiiiPKfS0_S0_Pf:
.quad _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf
.size _Z9GPU_adi_xiiiPKfS0_S0_Pf, 8
.type _Z9GPU_adi_yiiiPKfS0_S0_Pf,@object # @_Z9GPU_adi_yiiiPKfS0_S0_Pf
.globl _Z9GPU_adi_yiiiPKfS0_S0_Pf
.p2align 3, 0x0
_Z9GPU_adi_yiiiPKfS0_S0_Pf:
.quad _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf
.size _Z9GPU_adi_yiiiPKfS0_S0_Pf, 8
.type _Z9GPU_adi_ziiiPfPKfS1_S1_S1_,@object # @_Z9GPU_adi_ziiiPfPKfS1_S1_S1_
.globl _Z9GPU_adi_ziiiPfPKfS1_S1_S1_
.p2align 3, 0x0
_Z9GPU_adi_ziiiPfPKfS1_S1_S1_:
.quad _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_
.size _Z9GPU_adi_ziiiPfPKfS1_S1_S1_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_"
.size .L__unnamed_1, 52
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_"
.size .L__unnamed_2, 60
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_"
.size .L__unnamed_3, 62
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z13GPU_adi_x_newiiiPKfS0_S0_Pf"
.size .L__unnamed_4, 32
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z9GPU_adi_xiiiPKfS0_S0_Pf"
.size .L__unnamed_5, 27
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "_Z9GPU_adi_yiiiPKfS0_S0_Pf"
.size .L__unnamed_6, 27
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "_Z9GPU_adi_ziiiPfPKfS1_S1_S1_"
.size .L__unnamed_7, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.addrsig_sym _Z31__device_stub__GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.addrsig_sym _Z33__device_stub__GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.addrsig_sym _Z28__device_stub__GPU_adi_x_newiiiPKfS0_S0_Pf
.addrsig_sym _Z24__device_stub__GPU_adi_xiiiPKfS0_S0_Pf
.addrsig_sym _Z24__device_stub__GPU_adi_yiiiPKfS0_S0_Pf
.addrsig_sym _Z24__device_stub__GPU_adi_ziiiPfPKfS1_S1_S1_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11GPU_adi_rhsiiifPKfPfS1_S1_S1_S1_S1_S1_S1_S1_S1_
.addrsig_sym _Z16GPU_adi_x_float4iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.addrsig_sym _Z18GPU_adi_x_float4_2iiiPK15HIP_vector_typeIfLj4EES2_S2_PS0_
.addrsig_sym _Z13GPU_adi_x_newiiiPKfS0_S0_Pf
.addrsig_sym _Z9GPU_adi_xiiiPKfS0_S0_Pf
.addrsig_sym _Z9GPU_adi_yiiiPKfS0_S0_Pf
.addrsig_sym _Z9GPU_adi_ziiiPfPKfS1_S1_S1_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | ////
////#include "BC_NN_Functions.h"
////
//#include "Tensor.h"
// __global__
// void sigmoid(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] = 1 / (1+ pow(2.71828, -x[i]));
// }
// }
// __global__
// void sigmoid_deriv(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] *= (1 - x[i]);
// }
// }
////
// void sigmoid(Tensor<float, GPU>& x) { sigmoid<<<10, 128>>>(x.data(), x.size());}
// void sigmoid_deriv(Tensor<float, GPU>& x) { sigmoid_deriv<<<10, 128>>>(x.data(), x.size());}
//
//// | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | ////
////#include "BC_NN_Functions.h"
////
//#include "Tensor.h"
// __global__
// void sigmoid(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] = 1 / (1+ pow(2.71828, -x[i]));
// }
// }
// __global__
// void sigmoid_deriv(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] *= (1 - x[i]);
// }
// }
////
// void sigmoid(Tensor<float, GPU>& x) { sigmoid<<<10, 128>>>(x.data(), x.size());}
// void sigmoid_deriv(Tensor<float, GPU>& x) { sigmoid_deriv<<<10, 128>>>(x.data(), x.size());}
//
//// | .file "tmpxft_00172c4d_00000000-6_nonLinearityFunction.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | ////
////#include "BC_NN_Functions.h"
////
//#include "Tensor.h"
// __global__
// void sigmoid(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] = 1 / (1+ pow(2.71828, -x[i]));
// }
// }
// __global__
// void sigmoid_deriv(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] *= (1 - x[i]);
// }
// }
////
// void sigmoid(Tensor<float, GPU>& x) { sigmoid<<<10, 128>>>(x.data(), x.size());}
// void sigmoid_deriv(Tensor<float, GPU>& x) { sigmoid_deriv<<<10, 128>>>(x.data(), x.size());}
//
//// | #include <hip/hip_runtime.h>
////
////#include "BC_NN_Functions.h"
////
//#include "Tensor.h"
// __global__
// void sigmoid(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] = 1 / (1+ pow(2.71828, -x[i]));
// }
// }
// __global__
// void sigmoid_deriv(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] *= (1 - x[i]);
// }
// }
////
// void sigmoid(Tensor<float, GPU>& x) { sigmoid<<<10, 128>>>(x.data(), x.size());}
// void sigmoid_deriv(Tensor<float, GPU>& x) { sigmoid_deriv<<<10, 128>>>(x.data(), x.size());}
//
//// |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
////
////#include "BC_NN_Functions.h"
////
//#include "Tensor.h"
// __global__
// void sigmoid(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] = 1 / (1+ pow(2.71828, -x[i]));
// }
// }
// __global__
// void sigmoid_deriv(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] *= (1 - x[i]);
// }
// }
////
// void sigmoid(Tensor<float, GPU>& x) { sigmoid<<<10, 128>>>(x.data(), x.size());}
// void sigmoid_deriv(Tensor<float, GPU>& x) { sigmoid_deriv<<<10, 128>>>(x.data(), x.size());}
//
//// | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
////
////#include "BC_NN_Functions.h"
////
//#include "Tensor.h"
// __global__
// void sigmoid(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] = 1 / (1+ pow(2.71828, -x[i]));
// }
// }
// __global__
// void sigmoid_deriv(float* x, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// x[i] *= (1 - x[i]);
// }
// }
////
// void sigmoid(Tensor<float, GPU>& x) { sigmoid<<<10, 128>>>(x.data(), x.size());}
// void sigmoid_deriv(Tensor<float, GPU>& x) { sigmoid_deriv<<<10, 128>>>(x.data(), x.size());}
//
//// | .text
.file "nonLinearityFunction.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00172c4d_00000000-6_nonLinearityFunction.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "nonLinearityFunction.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #define TILE_DIM 32
template<typename T>
__device__ void vectorDotMatrix(const T* vector, const T* matrix, T* result,
const int rows, const int columns) {
__shared__ T vector_tile[TILE_DIM];
__shared__ T matrix_tile[TILE_DIM][TILE_DIM];
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
T resultValue = 0;
for (int t = 0; t < (rows - 1) / TILE_DIM + 1; t++) {
int idx = t * TILE_DIM + tx;
if (idx < rows) {
vector_tile[tx] = vector[idx];
} else {
vector_tile[tx] = 0;
}
if (index < columns) {
int firstTileRow = t * TILE_DIM;
for (int i = 0; i < TILE_DIM; i++) {
int row = firstTileRow + i;
if (row < rows) {
matrix_tile[i][tx] = matrix[row * columns + index];
} else {
matrix_tile[i][tx] = 0;
}
}
} else {
for (int i = 0; i < TILE_DIM; i++) {
matrix_tile[i][tx] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i++) {
resultValue += vector_tile[i] * matrix_tile[i][tx];
}
__syncthreads();
}
if (index < columns) {
result[index] = resultValue;
}
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #define TILE_DIM 32
template<typename T>
__device__ void vectorDotMatrix(const T* vector, const T* matrix, T* result,
const int rows, const int columns) {
__shared__ T vector_tile[TILE_DIM];
__shared__ T matrix_tile[TILE_DIM][TILE_DIM];
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
T resultValue = 0;
for (int t = 0; t < (rows - 1) / TILE_DIM + 1; t++) {
int idx = t * TILE_DIM + tx;
if (idx < rows) {
vector_tile[tx] = vector[idx];
} else {
vector_tile[tx] = 0;
}
if (index < columns) {
int firstTileRow = t * TILE_DIM;
for (int i = 0; i < TILE_DIM; i++) {
int row = firstTileRow + i;
if (row < rows) {
matrix_tile[i][tx] = matrix[row * columns + index];
} else {
matrix_tile[i][tx] = 0;
}
}
} else {
for (int i = 0; i < TILE_DIM; i++) {
matrix_tile[i][tx] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i++) {
resultValue += vector_tile[i] * matrix_tile[i][tx];
}
__syncthreads();
}
if (index < columns) {
result[index] = resultValue;
}
} | .file "tmpxft_001a8421_00000000-6_Vector2MatrixTensor.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #define TILE_DIM 32
template<typename T>
__device__ void vectorDotMatrix(const T* vector, const T* matrix, T* result,
const int rows, const int columns) {
__shared__ T vector_tile[TILE_DIM];
__shared__ T matrix_tile[TILE_DIM][TILE_DIM];
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
T resultValue = 0;
for (int t = 0; t < (rows - 1) / TILE_DIM + 1; t++) {
int idx = t * TILE_DIM + tx;
if (idx < rows) {
vector_tile[tx] = vector[idx];
} else {
vector_tile[tx] = 0;
}
if (index < columns) {
int firstTileRow = t * TILE_DIM;
for (int i = 0; i < TILE_DIM; i++) {
int row = firstTileRow + i;
if (row < rows) {
matrix_tile[i][tx] = matrix[row * columns + index];
} else {
matrix_tile[i][tx] = 0;
}
}
} else {
for (int i = 0; i < TILE_DIM; i++) {
matrix_tile[i][tx] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i++) {
resultValue += vector_tile[i] * matrix_tile[i][tx];
}
__syncthreads();
}
if (index < columns) {
result[index] = resultValue;
}
} | #include <hip/hip_runtime.h>
#define TILE_DIM 32
template<typename T>
__device__ void vectorDotMatrix(const T* vector, const T* matrix, T* result,
const int rows, const int columns) {
__shared__ T vector_tile[TILE_DIM];
__shared__ T matrix_tile[TILE_DIM][TILE_DIM];
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
T resultValue = 0;
for (int t = 0; t < (rows - 1) / TILE_DIM + 1; t++) {
int idx = t * TILE_DIM + tx;
if (idx < rows) {
vector_tile[tx] = vector[idx];
} else {
vector_tile[tx] = 0;
}
if (index < columns) {
int firstTileRow = t * TILE_DIM;
for (int i = 0; i < TILE_DIM; i++) {
int row = firstTileRow + i;
if (row < rows) {
matrix_tile[i][tx] = matrix[row * columns + index];
} else {
matrix_tile[i][tx] = 0;
}
}
} else {
for (int i = 0; i < TILE_DIM; i++) {
matrix_tile[i][tx] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i++) {
resultValue += vector_tile[i] * matrix_tile[i][tx];
}
__syncthreads();
}
if (index < columns) {
result[index] = resultValue;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#define TILE_DIM 32
template<typename T>
__device__ void vectorDotMatrix(const T* vector, const T* matrix, T* result,
const int rows, const int columns) {
__shared__ T vector_tile[TILE_DIM];
__shared__ T matrix_tile[TILE_DIM][TILE_DIM];
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
T resultValue = 0;
for (int t = 0; t < (rows - 1) / TILE_DIM + 1; t++) {
int idx = t * TILE_DIM + tx;
if (idx < rows) {
vector_tile[tx] = vector[idx];
} else {
vector_tile[tx] = 0;
}
if (index < columns) {
int firstTileRow = t * TILE_DIM;
for (int i = 0; i < TILE_DIM; i++) {
int row = firstTileRow + i;
if (row < rows) {
matrix_tile[i][tx] = matrix[row * columns + index];
} else {
matrix_tile[i][tx] = 0;
}
}
} else {
for (int i = 0; i < TILE_DIM; i++) {
matrix_tile[i][tx] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i++) {
resultValue += vector_tile[i] * matrix_tile[i][tx];
}
__syncthreads();
}
if (index < columns) {
result[index] = resultValue;
}
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#define TILE_DIM 32
template<typename T>
__device__ void vectorDotMatrix(const T* vector, const T* matrix, T* result,
const int rows, const int columns) {
__shared__ T vector_tile[TILE_DIM];
__shared__ T matrix_tile[TILE_DIM][TILE_DIM];
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
T resultValue = 0;
for (int t = 0; t < (rows - 1) / TILE_DIM + 1; t++) {
int idx = t * TILE_DIM + tx;
if (idx < rows) {
vector_tile[tx] = vector[idx];
} else {
vector_tile[tx] = 0;
}
if (index < columns) {
int firstTileRow = t * TILE_DIM;
for (int i = 0; i < TILE_DIM; i++) {
int row = firstTileRow + i;
if (row < rows) {
matrix_tile[i][tx] = matrix[row * columns + index];
} else {
matrix_tile[i][tx] = 0;
}
}
} else {
for (int i = 0; i < TILE_DIM; i++) {
matrix_tile[i][tx] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i++) {
resultValue += vector_tile[i] * matrix_tile[i][tx];
}
__syncthreads();
}
if (index < columns) {
result[index] = resultValue;
}
} | .text
.file "Vector2MatrixTensor.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001a8421_00000000-6_Vector2MatrixTensor.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "Vector2MatrixTensor.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cstdio>
using namespace std;
__global__ void adder(float* arr, float* block_incrs, int n) {
int tid = threadIdx.x;
extern __shared__ float sum[];
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
sum[0] = block_incrs[blockIdx.x];
__syncthreads();
if (gtid < n) arr[gtid] += sum[0];
}
__global__ void hillis_steele(float* g_idata, float* g_odata, int n,
float* block_sums) {
// all memory writes to be serviced immediately
extern volatile __shared__ float temp[];
int tid = threadIdx.x;
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
int block_size = blockDim.x;
int pout = 0, pin = 1;
float last_element;
// load input into shared memory
if (tid == 0 || gtid >= n)
temp[tid] = 0;
else
temp[tid] = g_idata[gtid - 1];
if (tid == block_size - 1)
if (gtid < n)
last_element = g_idata[gtid];
else
last_element = 0.0;
// entire temp should've gotten populated
__syncthreads();
// if (gtid == 5) {
// std::printf("global : %f===============\n", g_idata[gtid]);
// std::printf("temp : %f===============\n", temp[tid]);
// }
for (int offset = 1; offset < block_size; offset *= 2) {
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (tid >= offset)
temp[pout * block_size + tid] =
temp[pin * block_size + tid] + temp[pin * block_size + tid - offset];
else
temp[pout * block_size + tid] = temp[pin * block_size + tid];
__syncthreads(); // I need this here before I start next iteration
}
if (gtid < n) g_odata[gtid] = temp[pout * block_size + tid];
if (tid == block_size - 1)
block_sums[blockIdx.x] = last_element + temp[pout * block_size + tid];
}
__host__ void scan(const float* in, float* out, unsigned int n,
unsigned int threads_per_block) {
int num_blocks = (n + threads_per_block - 1) / threads_per_block;
// printf("num blocks : %d\n", num_blocks);
float *din, *dout, *block_sums, *block_incrs, *dummy;
cudaMalloc((void**)&din, n * sizeof(float));
cudaMalloc((void**)&dout, n * sizeof(float));
cudaMallocManaged((void**)&block_sums, num_blocks * sizeof(float));
cudaMallocManaged((void**)&block_incrs, num_blocks * sizeof(float));
cudaMallocManaged((void**)&dummy, sizeof(float));
cudaMemcpy(din, in, n * sizeof(float), cudaMemcpyHostToHost);
// // Only applicable if threads_per_block is a power of 2
// reduce_kernel<<<num_blocks, threads_per_block, sizeof(float) *
// threads_per_block>>>(din, block_sums, n);
hillis_steele<<<num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(din, dout, n,
block_sums);
cudaDeviceSynchronize();
// get the block increments (scan it once because of assumption)
int new_num_blocks = (num_blocks + threads_per_block - 1) /
threads_per_block; // will always be 1
hillis_steele<<<new_num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(
block_sums, block_incrs, num_blocks, dummy);
cudaDeviceSynchronize();
// for (int i = 0; i < num_blocks; i++)
// printf("%f ", block_incrs[i]);
// printf("\n");
// add each block increment to each block
adder<<<num_blocks, threads_per_block, sizeof(float)>>>(dout, block_incrs, n);
cudaDeviceSynchronize();
cudaMemcpy(out, dout, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(din);
cudaFree(dout);
cudaFree(block_sums);
cudaFree(block_incrs);
cudaFree(dummy);
} | code for sm_80
Function : _Z13hillis_steelePfS_iS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ BSSY B0, 0x100 ; /* 0x000000b000007945 */
/* 0x000fe20003800000 */
/*0050*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */
/* 0x000e240000002100 */
/*0060*/ IMAD R0, R9, c[0x0][0x0], R10 ; /* 0x0000000009007a24 */
/* 0x001fca00078e020a */
/*0070*/ IMAD.WIDE R2, R0.reuse, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x040fe200078e0203 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fc80003f06270 */
/*0090*/ ISETP.EQ.OR P1, PT, R10, RZ, P0 ; /* 0x000000ff0a00720c */
/* 0x000fda0000722670 */
/*00a0*/ @P1 BRA 0xe0 ; /* 0x0000003000001947 */
/* 0x000fea0003800000 */
/*00b0*/ LDG.E R4, [R2.64+-0x4] ; /* 0xfffffc0402047981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ STS [R10.X4], R4 ; /* 0x000000040a007388 */
/* 0x0041e20000004800 */
/*00d0*/ BRA 0xf0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*00e0*/ STS [R10.X4], RZ ; /* 0x000000ff0a007388 */
/* 0x0001e40000004800 */
/*00f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0100*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff057624 */
/* 0x000fe200078e00ff */
/*0110*/ BSSY B0, 0x1b0 ; /* 0x0000009000007945 */
/* 0x000fe80003800000 */
/*0120*/ IADD3 R4, R5, -0x1, RZ ; /* 0xffffffff05047810 */
/* 0x001fc40007ffe0ff */
/*0130*/ ISETP.GE.AND P2, PT, R5, 0x2, PT ; /* 0x000000020500780c */
/* 0x000fe20003f46270 */
/*0140*/ HFMA2.MMA R5, -RZ, RZ, 0, 0 ; /* 0x00000000ff057435 */
/* 0x000fe200000001ff */
/*0150*/ ISETP.NE.AND P1, PT, R10, R4, PT ; /* 0x000000040a00720c */
/* 0x000fda0003f25270 */
/*0160*/ @P1 BRA 0x1a0 ; /* 0x0000003000001947 */
/* 0x000fea0003800000 */
/*0170*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fe200078e00ff */
/*0180*/ @P0 BRA 0x1a0 ; /* 0x0000001000000947 */
/* 0x000fea0003800000 */
/*0190*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000164000c1e1900 */
/*01a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*01c0*/ @!P2 BRA 0x2e0 ; /* 0x000001100000a947 */
/* 0x000fea0003800000 */
/*01d0*/ MOV R5, RZ ; /* 0x000000ff00057202 */
/* 0x000fe20000000f00 */
/*01e0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x1 ; /* 0x00000001ff027424 */
/* 0x001fca00078e00ff */
/*01f0*/ ISETP.GE.AND P2, PT, R10, R2, PT ; /* 0x000000020a00720c */
/* 0x000fe20003f46270 */
/*0200*/ IMAD R3, R5.reuse, c[0x0][0x0], R10 ; /* 0x0000000005037a24 */
/* 0x040fe200078e020a */
/*0210*/ IADD3 R5, -R5, 0x1, RZ ; /* 0x0000000105057810 */
/* 0x000fca0007ffe1ff */
/*0220*/ IMAD R6, R5, c[0x0][0x0], R10 ; /* 0x0000000005067a24 */
/* 0x000fc800078e020a */
/*0230*/ IMAD.SHL.U32 R6, R6, 0x4, RZ ; /* 0x0000000406067824 */
/* 0x000fe400078e00ff */
/*0240*/ @P2 IADD3 R7, R3, -R2, RZ ; /* 0x8000000203072210 */
/* 0x000fe40007ffe0ff */
/*0250*/ LDS R3, [R3.X4] ; /* 0x0000000003037984 */
/* 0x000fe20000004800 */
/*0260*/ SHF.L.U32 R2, R2, 0x1, RZ ; /* 0x0000000102027819 */
/* 0x000fc600000006ff */
/*0270*/ @P2 LDS R8, [R7.X4] ; /* 0x0000000007082984 */
/* 0x000e240000004800 */
/*0280*/ @P2 FADD R8, R3, R8 ; /* 0x0000000803082221 */
/* 0x001fca0000000000 */
/*0290*/ @P2 STS [R6], R8 ; /* 0x0000000806002388 */
/* 0x0001e80000000800 */
/*02a0*/ @!P2 STS [R6], R3 ; /* 0x000000030600a388 */
/* 0x0001e80000000800 */
/*02b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*02c0*/ ISETP.GE.AND P2, PT, R2, c[0x0][0x0], PT ; /* 0x0000000002007a0c */
/* 0x000fda0003f46270 */
/*02d0*/ @!P2 BRA 0x1f0 ; /* 0xffffff100000a947 */
/* 0x001fea000383ffff */
/*02e0*/ IMAD R6, R5, c[0x0][0x0], R10 ; /* 0x0000000005067a24 */
/* 0x000fe200078e020a */
/*02f0*/ @!P0 MOV R3, 0x4 ; /* 0x0000000400038802 */
/* 0x001fc60000000f00 */
/*0300*/ IMAD.SHL.U32 R6, R6, 0x4, RZ ; /* 0x0000000406067824 */
/* 0x000fe400078e00ff */
/*0310*/ @!P0 IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000028625 */
/* 0x000fc600078e0203 */
/*0320*/ @!P0 LDS R5, [R6] ; /* 0x0000000006058984 */
/* 0x000e280000000800 */
/*0330*/ @!P0 STG.E [R2.64], R5 ; /* 0x0000000502008986 */
/* 0x0011e2000c101904 */
/*0340*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0350*/ LDS R3, [R6] ; /* 0x0000000006037984 */
/* 0x001e220000000800 */
/*0360*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fe400078e00ff */
/*0370*/ FADD R5, R3, R4 ; /* 0x0000000403057221 */
/* 0x021fe40000000000 */
/*0380*/ IMAD.WIDE.U32 R2, R9, R2, c[0x0][0x178] ; /* 0x00005e0009027625 */
/* 0x000fca00078e0002 */
/*0390*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*03a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03b0*/ BRA 0x3b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z5adderPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ BSSY B0, 0xe0 ; /* 0x000000a000007945 */
/* 0x000fe40003800000 */
/*0040*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e620000002500 */
/*0050*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x001fe20003f05270 */
/*0060*/ IMAD R0, R2, c[0x0][0x0], R3 ; /* 0x0000000002007a24 */
/* 0x002fca00078e0203 */
/*0070*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fce0003f26270 */
/*0080*/ @P0 BRA 0xd0 ; /* 0x0000004000000947 */
/* 0x000fea0003800000 */
/*0090*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fd400000001ff */
/*00a0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fcc00078e0003 */
/*00b0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ STS [RZ], R2 ; /* 0x00000002ff007388 */
/* 0x0041e40000000800 */
/*00d0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00f0*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0100*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fca0000000f00 */
/*0110*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x001fe400078e0203 */
/*0120*/ LDS R0, [RZ] ; /* 0x00000000ff007984 */
/* 0x000e280000000800 */
/*0130*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000e24000c1e1900 */
/*0140*/ FADD R5, R0, R5 ; /* 0x0000000500057221 */
/* 0x001fca0000000000 */
/*0150*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cstdio>
using namespace std;
__global__ void adder(float* arr, float* block_incrs, int n) {
int tid = threadIdx.x;
extern __shared__ float sum[];
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
sum[0] = block_incrs[blockIdx.x];
__syncthreads();
if (gtid < n) arr[gtid] += sum[0];
}
__global__ void hillis_steele(float* g_idata, float* g_odata, int n,
float* block_sums) {
// all memory writes to be serviced immediately
extern volatile __shared__ float temp[];
int tid = threadIdx.x;
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
int block_size = blockDim.x;
int pout = 0, pin = 1;
float last_element;
// load input into shared memory
if (tid == 0 || gtid >= n)
temp[tid] = 0;
else
temp[tid] = g_idata[gtid - 1];
if (tid == block_size - 1)
if (gtid < n)
last_element = g_idata[gtid];
else
last_element = 0.0;
// entire temp should've gotten populated
__syncthreads();
// if (gtid == 5) {
// std::printf("global : %f===============\n", g_idata[gtid]);
// std::printf("temp : %f===============\n", temp[tid]);
// }
for (int offset = 1; offset < block_size; offset *= 2) {
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (tid >= offset)
temp[pout * block_size + tid] =
temp[pin * block_size + tid] + temp[pin * block_size + tid - offset];
else
temp[pout * block_size + tid] = temp[pin * block_size + tid];
__syncthreads(); // I need this here before I start next iteration
}
if (gtid < n) g_odata[gtid] = temp[pout * block_size + tid];
if (tid == block_size - 1)
block_sums[blockIdx.x] = last_element + temp[pout * block_size + tid];
}
__host__ void scan(const float* in, float* out, unsigned int n,
unsigned int threads_per_block) {
int num_blocks = (n + threads_per_block - 1) / threads_per_block;
// printf("num blocks : %d\n", num_blocks);
float *din, *dout, *block_sums, *block_incrs, *dummy;
cudaMalloc((void**)&din, n * sizeof(float));
cudaMalloc((void**)&dout, n * sizeof(float));
cudaMallocManaged((void**)&block_sums, num_blocks * sizeof(float));
cudaMallocManaged((void**)&block_incrs, num_blocks * sizeof(float));
cudaMallocManaged((void**)&dummy, sizeof(float));
cudaMemcpy(din, in, n * sizeof(float), cudaMemcpyHostToHost);
// // Only applicable if threads_per_block is a power of 2
// reduce_kernel<<<num_blocks, threads_per_block, sizeof(float) *
// threads_per_block>>>(din, block_sums, n);
hillis_steele<<<num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(din, dout, n,
block_sums);
cudaDeviceSynchronize();
// get the block increments (scan it once because of assumption)
int new_num_blocks = (num_blocks + threads_per_block - 1) /
threads_per_block; // will always be 1
hillis_steele<<<new_num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(
block_sums, block_incrs, num_blocks, dummy);
cudaDeviceSynchronize();
// for (int i = 0; i < num_blocks; i++)
// printf("%f ", block_incrs[i]);
// printf("\n");
// add each block increment to each block
adder<<<num_blocks, threads_per_block, sizeof(float)>>>(dout, block_incrs, n);
cudaDeviceSynchronize();
cudaMemcpy(out, dout, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(din);
cudaFree(dout);
cudaFree(block_sums);
cudaFree(block_incrs);
cudaFree(dummy);
} | .file "tmpxft_0009604a_00000000-6_scan.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z5adderPfS_iPfS_i
.type _Z27__device_stub__Z5adderPfS_iPfS_i, @function
_Z27__device_stub__Z5adderPfS_iPfS_i:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5adderPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z5adderPfS_iPfS_i, .-_Z27__device_stub__Z5adderPfS_iPfS_i
.globl _Z5adderPfS_i
.type _Z5adderPfS_i, @function
_Z5adderPfS_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z5adderPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z5adderPfS_i, .-_Z5adderPfS_i
.globl _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_
.type _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_, @function
_Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_:
.LFB2084:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13hillis_steelePfS_iS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_, .-_Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_
.globl _Z13hillis_steelePfS_iS_
.type _Z13hillis_steelePfS_iS_, @function
_Z13hillis_steelePfS_iS_:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z13hillis_steelePfS_iS_, .-_Z13hillis_steelePfS_iS_
.globl _Z4scanPKfPfjj
.type _Z4scanPKfPfjj, @function
_Z4scanPKfPfjj:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movl %edx, %r14d
movl %ecx, %ebx
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leal -1(%rcx), %r12d
leal (%rdx,%r12), %eax
movl $0, %edx
divl %ecx
movl %eax, %ebp
movl %r14d, %r13d
salq $2, %r13
leaq 24(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movslq %ebp, %r15
salq $2, %r15
leaq 40(%rsp), %rdi
movl $1, %edx
movq %r15, %rsi
call cudaMallocManaged@PLT
leaq 48(%rsp), %rdi
movl $1, %edx
movq %r15, %rsi
call cudaMallocManaged@PLT
leaq 56(%rsp), %rdi
movl $1, %edx
movl $4, %esi
call cudaMallocManaged@PLT
movl $0, %ecx
movq %r13, %rdx
movq (%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
leal (%rbx,%rbx), %r15d
salq $2, %r15
movl %ebx, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl %ebp, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movq %r15, %r8
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L20:
call cudaDeviceSynchronize@PLT
movl %ebx, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leal 0(%rbp,%r12), %eax
movl $0, %edx
divl %ebx
movl %eax, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movq %r15, %r8
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L21:
call cudaDeviceSynchronize@PLT
movl %ebx, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl %ebp, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $4, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L27
.L22:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %r13, %rdx
movq 32(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L28
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 40(%rsp), %rcx
movl %r14d, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_
jmp .L20
.L26:
movq 56(%rsp), %rcx
movl %ebp, %edx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_
jmp .L21
.L27:
movl %r14d, %edx
movq 48(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z27__device_stub__Z5adderPfS_iPfS_i
jmp .L22
.L28:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z4scanPKfPfjj, .-_Z4scanPKfPfjj
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13hillis_steelePfS_iS_"
.LC1:
.string "_Z5adderPfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13hillis_steelePfS_iS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z5adderPfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cstdio>
using namespace std;
__global__ void adder(float* arr, float* block_incrs, int n) {
int tid = threadIdx.x;
extern __shared__ float sum[];
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
sum[0] = block_incrs[blockIdx.x];
__syncthreads();
if (gtid < n) arr[gtid] += sum[0];
}
__global__ void hillis_steele(float* g_idata, float* g_odata, int n,
float* block_sums) {
// all memory writes to be serviced immediately
extern volatile __shared__ float temp[];
int tid = threadIdx.x;
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
int block_size = blockDim.x;
int pout = 0, pin = 1;
float last_element;
// load input into shared memory
if (tid == 0 || gtid >= n)
temp[tid] = 0;
else
temp[tid] = g_idata[gtid - 1];
if (tid == block_size - 1)
if (gtid < n)
last_element = g_idata[gtid];
else
last_element = 0.0;
// entire temp should've gotten populated
__syncthreads();
// if (gtid == 5) {
// std::printf("global : %f===============\n", g_idata[gtid]);
// std::printf("temp : %f===============\n", temp[tid]);
// }
for (int offset = 1; offset < block_size; offset *= 2) {
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (tid >= offset)
temp[pout * block_size + tid] =
temp[pin * block_size + tid] + temp[pin * block_size + tid - offset];
else
temp[pout * block_size + tid] = temp[pin * block_size + tid];
__syncthreads(); // I need this here before I start next iteration
}
if (gtid < n) g_odata[gtid] = temp[pout * block_size + tid];
if (tid == block_size - 1)
block_sums[blockIdx.x] = last_element + temp[pout * block_size + tid];
}
__host__ void scan(const float* in, float* out, unsigned int n,
unsigned int threads_per_block) {
int num_blocks = (n + threads_per_block - 1) / threads_per_block;
// printf("num blocks : %d\n", num_blocks);
float *din, *dout, *block_sums, *block_incrs, *dummy;
cudaMalloc((void**)&din, n * sizeof(float));
cudaMalloc((void**)&dout, n * sizeof(float));
cudaMallocManaged((void**)&block_sums, num_blocks * sizeof(float));
cudaMallocManaged((void**)&block_incrs, num_blocks * sizeof(float));
cudaMallocManaged((void**)&dummy, sizeof(float));
cudaMemcpy(din, in, n * sizeof(float), cudaMemcpyHostToHost);
// // Only applicable if threads_per_block is a power of 2
// reduce_kernel<<<num_blocks, threads_per_block, sizeof(float) *
// threads_per_block>>>(din, block_sums, n);
hillis_steele<<<num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(din, dout, n,
block_sums);
cudaDeviceSynchronize();
// get the block increments (scan it once because of assumption)
int new_num_blocks = (num_blocks + threads_per_block - 1) /
threads_per_block; // will always be 1
hillis_steele<<<new_num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(
block_sums, block_incrs, num_blocks, dummy);
cudaDeviceSynchronize();
// for (int i = 0; i < num_blocks; i++)
// printf("%f ", block_incrs[i]);
// printf("\n");
// add each block increment to each block
adder<<<num_blocks, threads_per_block, sizeof(float)>>>(dout, block_incrs, n);
cudaDeviceSynchronize();
cudaMemcpy(out, dout, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(din);
cudaFree(dout);
cudaFree(block_sums);
cudaFree(block_incrs);
cudaFree(dummy);
} | #include <hip/hip_runtime.h>
#include <cstdio>
using namespace std;
__global__ void adder(float* arr, float* block_incrs, int n) {
int tid = threadIdx.x;
extern __shared__ float sum[];
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
sum[0] = block_incrs[blockIdx.x];
__syncthreads();
if (gtid < n) arr[gtid] += sum[0];
}
__global__ void hillis_steele(float* g_idata, float* g_odata, int n,
float* block_sums) {
// all memory writes to be serviced immediately
extern volatile __shared__ float temp[];
int tid = threadIdx.x;
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
int block_size = blockDim.x;
int pout = 0, pin = 1;
float last_element;
// load input into shared memory
if (tid == 0 || gtid >= n)
temp[tid] = 0;
else
temp[tid] = g_idata[gtid - 1];
if (tid == block_size - 1)
if (gtid < n)
last_element = g_idata[gtid];
else
last_element = 0.0;
// entire temp should've gotten populated
__syncthreads();
// if (gtid == 5) {
// std::printf("global : %f===============\n", g_idata[gtid]);
// std::printf("temp : %f===============\n", temp[tid]);
// }
for (int offset = 1; offset < block_size; offset *= 2) {
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (tid >= offset)
temp[pout * block_size + tid] =
temp[pin * block_size + tid] + temp[pin * block_size + tid - offset];
else
temp[pout * block_size + tid] = temp[pin * block_size + tid];
__syncthreads(); // I need this here before I start next iteration
}
if (gtid < n) g_odata[gtid] = temp[pout * block_size + tid];
if (tid == block_size - 1)
block_sums[blockIdx.x] = last_element + temp[pout * block_size + tid];
}
__host__ void scan(const float* in, float* out, unsigned int n,
unsigned int threads_per_block) {
int num_blocks = (n + threads_per_block - 1) / threads_per_block;
// printf("num blocks : %d\n", num_blocks);
float *din, *dout, *block_sums, *block_incrs, *dummy;
hipMalloc((void**)&din, n * sizeof(float));
hipMalloc((void**)&dout, n * sizeof(float));
hipMallocManaged((void**)&block_sums, num_blocks * sizeof(float));
hipMallocManaged((void**)&block_incrs, num_blocks * sizeof(float));
hipMallocManaged((void**)&dummy, sizeof(float));
hipMemcpy(din, in, n * sizeof(float), hipMemcpyHostToHost);
// // Only applicable if threads_per_block is a power of 2
// reduce_kernel<<<num_blocks, threads_per_block, sizeof(float) *
// threads_per_block>>>(din, block_sums, n);
hillis_steele<<<num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(din, dout, n,
block_sums);
hipDeviceSynchronize();
// get the block increments (scan it once because of assumption)
int new_num_blocks = (num_blocks + threads_per_block - 1) /
threads_per_block; // will always be 1
hillis_steele<<<new_num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(
block_sums, block_incrs, num_blocks, dummy);
hipDeviceSynchronize();
// for (int i = 0; i < num_blocks; i++)
// printf("%f ", block_incrs[i]);
// printf("\n");
// add each block increment to each block
adder<<<num_blocks, threads_per_block, sizeof(float)>>>(dout, block_incrs, n);
hipDeviceSynchronize();
hipMemcpy(out, dout, n * sizeof(float), hipMemcpyDeviceToHost);
hipFree(din);
hipFree(dout);
hipFree(block_sums);
hipFree(block_incrs);
hipFree(dummy);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <cstdio>
using namespace std;
__global__ void adder(float* arr, float* block_incrs, int n) {
int tid = threadIdx.x;
extern __shared__ float sum[];
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
sum[0] = block_incrs[blockIdx.x];
__syncthreads();
if (gtid < n) arr[gtid] += sum[0];
}
__global__ void hillis_steele(float* g_idata, float* g_odata, int n,
float* block_sums) {
// all memory writes to be serviced immediately
extern volatile __shared__ float temp[];
int tid = threadIdx.x;
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
int block_size = blockDim.x;
int pout = 0, pin = 1;
float last_element;
// load input into shared memory
if (tid == 0 || gtid >= n)
temp[tid] = 0;
else
temp[tid] = g_idata[gtid - 1];
if (tid == block_size - 1)
if (gtid < n)
last_element = g_idata[gtid];
else
last_element = 0.0;
// entire temp should've gotten populated
__syncthreads();
// if (gtid == 5) {
// std::printf("global : %f===============\n", g_idata[gtid]);
// std::printf("temp : %f===============\n", temp[tid]);
// }
for (int offset = 1; offset < block_size; offset *= 2) {
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (tid >= offset)
temp[pout * block_size + tid] =
temp[pin * block_size + tid] + temp[pin * block_size + tid - offset];
else
temp[pout * block_size + tid] = temp[pin * block_size + tid];
__syncthreads(); // I need this here before I start next iteration
}
if (gtid < n) g_odata[gtid] = temp[pout * block_size + tid];
if (tid == block_size - 1)
block_sums[blockIdx.x] = last_element + temp[pout * block_size + tid];
}
__host__ void scan(const float* in, float* out, unsigned int n,
unsigned int threads_per_block) {
int num_blocks = (n + threads_per_block - 1) / threads_per_block;
// printf("num blocks : %d\n", num_blocks);
float *din, *dout, *block_sums, *block_incrs, *dummy;
hipMalloc((void**)&din, n * sizeof(float));
hipMalloc((void**)&dout, n * sizeof(float));
hipMallocManaged((void**)&block_sums, num_blocks * sizeof(float));
hipMallocManaged((void**)&block_incrs, num_blocks * sizeof(float));
hipMallocManaged((void**)&dummy, sizeof(float));
hipMemcpy(din, in, n * sizeof(float), hipMemcpyHostToHost);
// // Only applicable if threads_per_block is a power of 2
// reduce_kernel<<<num_blocks, threads_per_block, sizeof(float) *
// threads_per_block>>>(din, block_sums, n);
hillis_steele<<<num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(din, dout, n,
block_sums);
hipDeviceSynchronize();
// get the block increments (scan it once because of assumption)
int new_num_blocks = (num_blocks + threads_per_block - 1) /
threads_per_block; // will always be 1
hillis_steele<<<new_num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(
block_sums, block_incrs, num_blocks, dummy);
hipDeviceSynchronize();
// for (int i = 0; i < num_blocks; i++)
// printf("%f ", block_incrs[i]);
// printf("\n");
// add each block increment to each block
adder<<<num_blocks, threads_per_block, sizeof(float)>>>(dout, block_incrs, n);
hipDeviceSynchronize();
hipMemcpy(out, dout, n * sizeof(float), hipMemcpyDeviceToHost);
hipFree(din);
hipFree(dout);
hipFree(block_sums);
hipFree(block_incrs);
hipFree(dummy);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5adderPfS_i
.globl _Z5adderPfS_i
.p2align 8
.type _Z5adderPfS_i,@function
_Z5adderPfS_i:
s_load_b32 s4, s[0:1], 0x24
s_mov_b32 s2, s15
s_mov_b32 s3, 0
s_mov_b32 s5, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_2
s_load_b64 s[6:7], s[0:1], 0x8
s_lshl_b64 s[8:9], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s6, s6, s8
s_addc_u32 s7, s7, s9
s_load_b32 s3, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s3
ds_store_b32 v1, v2
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s5
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, 0xffff, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[1:2], null, s2, s4, v[0:1]
s_barrier
buffer_gl0_inv
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_4
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[1:2]
ds_load_b32 v3, v3
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5adderPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5adderPfS_i, .Lfunc_end0-_Z5adderPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13hillis_steelePfS_iS_
.globl _Z13hillis_steelePfS_iS_
.p2align 8
.type _Z13hillis_steelePfS_iS_,@function
_Z13hillis_steelePfS_iS_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x10
s_load_b64 s[6:7], s[0:1], 0x0
s_mov_b32 s4, s15
v_mov_b32_e32 v5, 0
v_mov_b32_e32 v3, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s2, 0xffff
v_cmp_ne_u32_e64 s2, 0, v0
v_mad_u64_u32 v[1:2], null, s4, s5, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
v_ashrrev_i32_e32 v2, 31, v1
s_and_b32 s2, s2, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v3, s2, s6, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s2, s7, v4, s2
global_load_b32 v3, v[3:4], off offset:-4
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s3
v_lshl_add_u32 v4, v0, 2, 0
s_add_i32 s2, s5, -1
s_mov_b64 s[8:9], src_shared_base
v_cmp_eq_u32_e64 s2, s2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ne_u32_e64 s3, -1, v4
v_cndmask_b32_e64 v6, 0, v4, s3
v_cndmask_b32_e64 v7, 0, s9, s3
s_delay_alu instid0(VALU_DEP_4)
s_and_b32 s3, s2, vcc_lo
s_waitcnt vmcnt(0)
flat_store_b32 v[6:7], v3 dlc
s_waitcnt_vscnt null, 0x0
s_and_saveexec_b32 s8, s3
s_cbranch_execz .LBB1_4
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s3, s6, v3
v_add_co_ci_u32_e64 v4, s3, s7, v4, s3
global_load_b32 v5, v[3:4], off
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s8
s_cmp_lt_u32 s5, 2
s_mov_b32 s8, 0
s_waitcnt vmcnt(0) lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_9
s_mov_b32 s9, 1
s_mov_b64 s[6:7], src_shared_base
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_7
.p2align 6
.LBB1_6:
s_or_b32 exec_lo, exec_lo, s6
s_sub_i32 s8, 1, s8
s_lshl_b32 s9, s9, 1
v_mad_u64_u32 v[6:7], null, s8, s5, v[0:1]
s_cmp_ge_u32 s9, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v3, v6, 2, 0
v_cmp_ne_u32_e64 s3, -1, v3
s_delay_alu instid0(VALU_DEP_1)
v_cndmask_b32_e64 v6, 0, v3, s3
v_cndmask_b32_e64 v7, 0, s7, s3
s_waitcnt lgkmcnt(0)
flat_store_b32 v[6:7], v4 dlc
s_waitcnt_vscnt null, 0x0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_9
.LBB1_7:
v_mad_u64_u32 v[3:4], null, s8, s5, v[0:1]
s_mov_b32 s6, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v4, v3, 2, 0
v_cmp_ne_u32_e64 s3, -1, v4
s_delay_alu instid0(VALU_DEP_1)
v_cndmask_b32_e64 v6, 0, v4, s3
v_cndmask_b32_e64 v7, 0, s7, s3
flat_load_b32 v4, v[6:7] glc dlc
s_waitcnt vmcnt(0)
v_cmpx_le_u32_e64 s9, v0
s_cbranch_execz .LBB1_6
v_subrev_nc_u32_e32 v3, s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v3, v3, 2, 0
v_cmp_ne_u32_e64 s3, -1, v3
s_delay_alu instid0(VALU_DEP_1)
v_cndmask_b32_e64 v6, 0, v3, s3
v_cndmask_b32_e64 v7, 0, s7, s3
flat_load_b32 v3, v[6:7] glc dlc
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_f32_e32 v4, v4, v3
s_branch .LBB1_6
.LBB1_9:
s_set_inst_prefetch_distance 0x2
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB1_11
v_mad_u64_u32 v[3:4], null, s8, s5, v[0:1]
s_mov_b64 s[6:7], src_shared_base
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v3, v3, 2, 0
v_cmp_ne_u32_e32 vcc_lo, -1, v3
v_cndmask_b32_e32 v3, 0, v3, vcc_lo
v_cndmask_b32_e64 v4, 0, s7, vcc_lo
s_load_b64 s[6:7], s[0:1], 0x8
flat_load_b32 v3, v[3:4] glc dlc
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_store_b32 v[1:2], v3, off
.LBB1_11:
s_or_b32 exec_lo, exec_lo, s3
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_13
v_mad_u64_u32 v[1:2], null, s8, s5, v[0:1]
s_mov_b64 s[2:3], src_shared_base
s_load_b64 s[0:1], s[0:1], 0x18
s_mov_b32 s5, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v0, v1, 2, 0
v_cmp_ne_u32_e32 vcc_lo, -1, v0
v_cndmask_b32_e32 v0, 0, v0, vcc_lo
v_cndmask_b32_e64 v1, 0, s3, vcc_lo
s_lshl_b64 s[2:3], s[4:5], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
flat_load_b32 v0, v[0:1] glc dlc
s_waitcnt vmcnt(0)
v_mov_b32_e32 v1, 0
s_addc_u32 s1, s1, s3
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v0, v5, v0
global_store_b32 v1, v0, s[0:1]
.LBB1_13:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13hillis_steelePfS_iS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13hillis_steelePfS_iS_, .Lfunc_end1-_Z13hillis_steelePfS_iS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym temp
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5adderPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5adderPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
- .offset: 152
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13hillis_steelePfS_iS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13hillis_steelePfS_iS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <cstdio>
using namespace std;
__global__ void adder(float* arr, float* block_incrs, int n) {
int tid = threadIdx.x;
extern __shared__ float sum[];
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
sum[0] = block_incrs[blockIdx.x];
__syncthreads();
if (gtid < n) arr[gtid] += sum[0];
}
__global__ void hillis_steele(float* g_idata, float* g_odata, int n,
float* block_sums) {
// all memory writes to be serviced immediately
extern volatile __shared__ float temp[];
int tid = threadIdx.x;
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
int block_size = blockDim.x;
int pout = 0, pin = 1;
float last_element;
// load input into shared memory
if (tid == 0 || gtid >= n)
temp[tid] = 0;
else
temp[tid] = g_idata[gtid - 1];
if (tid == block_size - 1)
if (gtid < n)
last_element = g_idata[gtid];
else
last_element = 0.0;
// entire temp should've gotten populated
__syncthreads();
// if (gtid == 5) {
// std::printf("global : %f===============\n", g_idata[gtid]);
// std::printf("temp : %f===============\n", temp[tid]);
// }
for (int offset = 1; offset < block_size; offset *= 2) {
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (tid >= offset)
temp[pout * block_size + tid] =
temp[pin * block_size + tid] + temp[pin * block_size + tid - offset];
else
temp[pout * block_size + tid] = temp[pin * block_size + tid];
__syncthreads(); // I need this here before I start next iteration
}
if (gtid < n) g_odata[gtid] = temp[pout * block_size + tid];
if (tid == block_size - 1)
block_sums[blockIdx.x] = last_element + temp[pout * block_size + tid];
}
__host__ void scan(const float* in, float* out, unsigned int n,
unsigned int threads_per_block) {
int num_blocks = (n + threads_per_block - 1) / threads_per_block;
// printf("num blocks : %d\n", num_blocks);
float *din, *dout, *block_sums, *block_incrs, *dummy;
hipMalloc((void**)&din, n * sizeof(float));
hipMalloc((void**)&dout, n * sizeof(float));
hipMallocManaged((void**)&block_sums, num_blocks * sizeof(float));
hipMallocManaged((void**)&block_incrs, num_blocks * sizeof(float));
hipMallocManaged((void**)&dummy, sizeof(float));
hipMemcpy(din, in, n * sizeof(float), hipMemcpyHostToHost);
// // Only applicable if threads_per_block is a power of 2
// reduce_kernel<<<num_blocks, threads_per_block, sizeof(float) *
// threads_per_block>>>(din, block_sums, n);
hillis_steele<<<num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(din, dout, n,
block_sums);
hipDeviceSynchronize();
// get the block increments (scan it once because of assumption)
int new_num_blocks = (num_blocks + threads_per_block - 1) /
threads_per_block; // will always be 1
hillis_steele<<<new_num_blocks, threads_per_block,
2 * threads_per_block * sizeof(float)>>>(
block_sums, block_incrs, num_blocks, dummy);
hipDeviceSynchronize();
// for (int i = 0; i < num_blocks; i++)
// printf("%f ", block_incrs[i]);
// printf("\n");
// add each block increment to each block
adder<<<num_blocks, threads_per_block, sizeof(float)>>>(dout, block_incrs, n);
hipDeviceSynchronize();
hipMemcpy(out, dout, n * sizeof(float), hipMemcpyDeviceToHost);
hipFree(din);
hipFree(dout);
hipFree(block_sums);
hipFree(block_incrs);
hipFree(dummy);
} | .text
.file "scan.hip"
.globl _Z20__device_stub__adderPfS_i # -- Begin function _Z20__device_stub__adderPfS_i
.p2align 4, 0x90
.type _Z20__device_stub__adderPfS_i,@function
_Z20__device_stub__adderPfS_i: # @_Z20__device_stub__adderPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5adderPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z20__device_stub__adderPfS_i, .Lfunc_end0-_Z20__device_stub__adderPfS_i
.cfi_endproc
# -- End function
.globl _Z28__device_stub__hillis_steelePfS_iS_ # -- Begin function _Z28__device_stub__hillis_steelePfS_iS_
.p2align 4, 0x90
.type _Z28__device_stub__hillis_steelePfS_iS_,@function
_Z28__device_stub__hillis_steelePfS_iS_: # @_Z28__device_stub__hillis_steelePfS_iS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movq %rcx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13hillis_steelePfS_iS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z28__device_stub__hillis_steelePfS_iS_, .Lfunc_end1-_Z28__device_stub__hillis_steelePfS_iS_
.cfi_endproc
# -- End function
.globl _Z4scanPKfPfjj # -- Begin function _Z4scanPKfPfjj
.p2align 4, 0x90
.type _Z4scanPKfPfjj,@function
_Z4scanPKfPfjj: # @_Z4scanPKfPfjj
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %r15d
movl %edx, %ecx
movq %rsi, 176(%rsp) # 8-byte Spill
movq %rdi, %r13
movabsq $4294967296, %rbx # imm = 0x100000000
leal (%r15,%rcx), %eax
decl %eax
xorl %edx, %edx
divl %r15d
movl %eax, %r12d
movq %rcx, 152(%rsp) # 8-byte Spill
movl %ecx, %r14d
shlq $2, %r14
leaq 104(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 80(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movslq %r12d, %rbp
shlq $2, %rbp
leaq 96(%rsp), %rdi
movq %rbp, %rsi
movl $1, %edx
callq hipMallocManaged
leaq 88(%rsp), %rdi
movq %rbp, %rsi
movl $1, %edx
callq hipMallocManaged
leaq 160(%rsp), %rdi
movl $4, %esi
movl $1, %edx
callq hipMallocManaged
movq 104(%rsp), %rdi
movq %r13, %rsi
movq %r14, %rdx
xorl %ecx, %ecx
callq hipMemcpy
leal (%r15,%r15), %ebp
shlq $2, %rbp
leaq (%r12,%rbx), %rdi
movl %r15d, %r13d
orq %rbx, %r13
movq %rdi, 168(%rsp) # 8-byte Spill
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
movq %rbp, %r8
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq 104(%rsp), %rax
movq 80(%rsp), %rcx
movq 96(%rsp), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq 152(%rsp), %rax # 8-byte Reload
movl %eax, 4(%rsp)
movq %rdx, 24(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13hillis_steelePfS_iS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
callq hipDeviceSynchronize
leal (%r15,%r12), %eax
decl %eax
xorl %edx, %edx
divl %r15d
# kill: def $eax killed $eax def $rax
orq %rax, %rbx
movq %rbx, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
movq %rbp, %r8
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
movq 96(%rsp), %rax
movq 88(%rsp), %rcx
movq 160(%rsp), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r12d, 4(%rsp)
movq %rdx, 24(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13hillis_steelePfS_iS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
callq hipDeviceSynchronize
movl $4, %r8d
movq 168(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 80(%rsp), %rax
movq 88(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq 152(%rsp), %rax # 8-byte Reload
movl %eax, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 8(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z5adderPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
callq hipDeviceSynchronize
movq 80(%rsp), %rsi
movq 176(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 104(%rsp), %rdi
callq hipFree
movq 80(%rsp), %rdi
callq hipFree
movq 96(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq 160(%rsp), %rdi
callq hipFree
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z4scanPKfPfjj, .Lfunc_end2-_Z4scanPKfPfjj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5adderPfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13hillis_steelePfS_iS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5adderPfS_i,@object # @_Z5adderPfS_i
.section .rodata,"a",@progbits
.globl _Z5adderPfS_i
.p2align 3, 0x0
_Z5adderPfS_i:
.quad _Z20__device_stub__adderPfS_i
.size _Z5adderPfS_i, 8
.type _Z13hillis_steelePfS_iS_,@object # @_Z13hillis_steelePfS_iS_
.globl _Z13hillis_steelePfS_iS_
.p2align 3, 0x0
_Z13hillis_steelePfS_iS_:
.quad _Z28__device_stub__hillis_steelePfS_iS_
.size _Z13hillis_steelePfS_iS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5adderPfS_i"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z13hillis_steelePfS_iS_"
.size .L__unnamed_2, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__adderPfS_i
.addrsig_sym _Z28__device_stub__hillis_steelePfS_iS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5adderPfS_i
.addrsig_sym _Z13hillis_steelePfS_iS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13hillis_steelePfS_iS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ BSSY B0, 0x100 ; /* 0x000000b000007945 */
/* 0x000fe20003800000 */
/*0050*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */
/* 0x000e240000002100 */
/*0060*/ IMAD R0, R9, c[0x0][0x0], R10 ; /* 0x0000000009007a24 */
/* 0x001fca00078e020a */
/*0070*/ IMAD.WIDE R2, R0.reuse, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x040fe200078e0203 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fc80003f06270 */
/*0090*/ ISETP.EQ.OR P1, PT, R10, RZ, P0 ; /* 0x000000ff0a00720c */
/* 0x000fda0000722670 */
/*00a0*/ @P1 BRA 0xe0 ; /* 0x0000003000001947 */
/* 0x000fea0003800000 */
/*00b0*/ LDG.E R4, [R2.64+-0x4] ; /* 0xfffffc0402047981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ STS [R10.X4], R4 ; /* 0x000000040a007388 */
/* 0x0041e20000004800 */
/*00d0*/ BRA 0xf0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*00e0*/ STS [R10.X4], RZ ; /* 0x000000ff0a007388 */
/* 0x0001e40000004800 */
/*00f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0100*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff057624 */
/* 0x000fe200078e00ff */
/*0110*/ BSSY B0, 0x1b0 ; /* 0x0000009000007945 */
/* 0x000fe80003800000 */
/*0120*/ IADD3 R4, R5, -0x1, RZ ; /* 0xffffffff05047810 */
/* 0x001fc40007ffe0ff */
/*0130*/ ISETP.GE.AND P2, PT, R5, 0x2, PT ; /* 0x000000020500780c */
/* 0x000fe20003f46270 */
/*0140*/ HFMA2.MMA R5, -RZ, RZ, 0, 0 ; /* 0x00000000ff057435 */
/* 0x000fe200000001ff */
/*0150*/ ISETP.NE.AND P1, PT, R10, R4, PT ; /* 0x000000040a00720c */
/* 0x000fda0003f25270 */
/*0160*/ @P1 BRA 0x1a0 ; /* 0x0000003000001947 */
/* 0x000fea0003800000 */
/*0170*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fe200078e00ff */
/*0180*/ @P0 BRA 0x1a0 ; /* 0x0000001000000947 */
/* 0x000fea0003800000 */
/*0190*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000164000c1e1900 */
/*01a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*01c0*/ @!P2 BRA 0x2e0 ; /* 0x000001100000a947 */
/* 0x000fea0003800000 */
/*01d0*/ MOV R5, RZ ; /* 0x000000ff00057202 */
/* 0x000fe20000000f00 */
/*01e0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x1 ; /* 0x00000001ff027424 */
/* 0x001fca00078e00ff */
/*01f0*/ ISETP.GE.AND P2, PT, R10, R2, PT ; /* 0x000000020a00720c */
/* 0x000fe20003f46270 */
/*0200*/ IMAD R3, R5.reuse, c[0x0][0x0], R10 ; /* 0x0000000005037a24 */
/* 0x040fe200078e020a */
/*0210*/ IADD3 R5, -R5, 0x1, RZ ; /* 0x0000000105057810 */
/* 0x000fca0007ffe1ff */
/*0220*/ IMAD R6, R5, c[0x0][0x0], R10 ; /* 0x0000000005067a24 */
/* 0x000fc800078e020a */
/*0230*/ IMAD.SHL.U32 R6, R6, 0x4, RZ ; /* 0x0000000406067824 */
/* 0x000fe400078e00ff */
/*0240*/ @P2 IADD3 R7, R3, -R2, RZ ; /* 0x8000000203072210 */
/* 0x000fe40007ffe0ff */
/*0250*/ LDS R3, [R3.X4] ; /* 0x0000000003037984 */
/* 0x000fe20000004800 */
/*0260*/ SHF.L.U32 R2, R2, 0x1, RZ ; /* 0x0000000102027819 */
/* 0x000fc600000006ff */
/*0270*/ @P2 LDS R8, [R7.X4] ; /* 0x0000000007082984 */
/* 0x000e240000004800 */
/*0280*/ @P2 FADD R8, R3, R8 ; /* 0x0000000803082221 */
/* 0x001fca0000000000 */
/*0290*/ @P2 STS [R6], R8 ; /* 0x0000000806002388 */
/* 0x0001e80000000800 */
/*02a0*/ @!P2 STS [R6], R3 ; /* 0x000000030600a388 */
/* 0x0001e80000000800 */
/*02b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*02c0*/ ISETP.GE.AND P2, PT, R2, c[0x0][0x0], PT ; /* 0x0000000002007a0c */
/* 0x000fda0003f46270 */
/*02d0*/ @!P2 BRA 0x1f0 ; /* 0xffffff100000a947 */
/* 0x001fea000383ffff */
/*02e0*/ IMAD R6, R5, c[0x0][0x0], R10 ; /* 0x0000000005067a24 */
/* 0x000fe200078e020a */
/*02f0*/ @!P0 MOV R3, 0x4 ; /* 0x0000000400038802 */
/* 0x001fc60000000f00 */
/*0300*/ IMAD.SHL.U32 R6, R6, 0x4, RZ ; /* 0x0000000406067824 */
/* 0x000fe400078e00ff */
/*0310*/ @!P0 IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000028625 */
/* 0x000fc600078e0203 */
/*0320*/ @!P0 LDS R5, [R6] ; /* 0x0000000006058984 */
/* 0x000e280000000800 */
/*0330*/ @!P0 STG.E [R2.64], R5 ; /* 0x0000000502008986 */
/* 0x0011e2000c101904 */
/*0340*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0350*/ LDS R3, [R6] ; /* 0x0000000006037984 */
/* 0x001e220000000800 */
/*0360*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fe400078e00ff */
/*0370*/ FADD R5, R3, R4 ; /* 0x0000000403057221 */
/* 0x021fe40000000000 */
/*0380*/ IMAD.WIDE.U32 R2, R9, R2, c[0x0][0x178] ; /* 0x00005e0009027625 */
/* 0x000fca00078e0002 */
/*0390*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*03a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03b0*/ BRA 0x3b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z5adderPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ BSSY B0, 0xe0 ; /* 0x000000a000007945 */
/* 0x000fe40003800000 */
/*0040*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e620000002500 */
/*0050*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x001fe20003f05270 */
/*0060*/ IMAD R0, R2, c[0x0][0x0], R3 ; /* 0x0000000002007a24 */
/* 0x002fca00078e0203 */
/*0070*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fce0003f26270 */
/*0080*/ @P0 BRA 0xd0 ; /* 0x0000004000000947 */
/* 0x000fea0003800000 */
/*0090*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fd400000001ff */
/*00a0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fcc00078e0003 */
/*00b0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ STS [RZ], R2 ; /* 0x00000002ff007388 */
/* 0x0041e40000000800 */
/*00d0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00f0*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0100*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fca0000000f00 */
/*0110*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x001fe400078e0203 */
/*0120*/ LDS R0, [RZ] ; /* 0x00000000ff007984 */
/* 0x000e280000000800 */
/*0130*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000e24000c1e1900 */
/*0140*/ FADD R5, R0, R5 ; /* 0x0000000500057221 */
/* 0x001fca0000000000 */
/*0150*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5adderPfS_i
.globl _Z5adderPfS_i
.p2align 8
.type _Z5adderPfS_i,@function
_Z5adderPfS_i:
s_load_b32 s4, s[0:1], 0x24
s_mov_b32 s2, s15
s_mov_b32 s3, 0
s_mov_b32 s5, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_2
s_load_b64 s[6:7], s[0:1], 0x8
s_lshl_b64 s[8:9], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s6, s6, s8
s_addc_u32 s7, s7, s9
s_load_b32 s3, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s3
ds_store_b32 v1, v2
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s5
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, 0xffff, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[1:2], null, s2, s4, v[0:1]
s_barrier
buffer_gl0_inv
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_4
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[1:2]
ds_load_b32 v3, v3
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5adderPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5adderPfS_i, .Lfunc_end0-_Z5adderPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13hillis_steelePfS_iS_
.globl _Z13hillis_steelePfS_iS_
.p2align 8
.type _Z13hillis_steelePfS_iS_,@function
_Z13hillis_steelePfS_iS_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x10
s_load_b64 s[6:7], s[0:1], 0x0
s_mov_b32 s4, s15
v_mov_b32_e32 v5, 0
v_mov_b32_e32 v3, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s2, 0xffff
v_cmp_ne_u32_e64 s2, 0, v0
v_mad_u64_u32 v[1:2], null, s4, s5, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
v_ashrrev_i32_e32 v2, 31, v1
s_and_b32 s2, s2, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v3, s2, s6, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s2, s7, v4, s2
global_load_b32 v3, v[3:4], off offset:-4
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s3
v_lshl_add_u32 v4, v0, 2, 0
s_add_i32 s2, s5, -1
s_mov_b64 s[8:9], src_shared_base
v_cmp_eq_u32_e64 s2, s2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ne_u32_e64 s3, -1, v4
v_cndmask_b32_e64 v6, 0, v4, s3
v_cndmask_b32_e64 v7, 0, s9, s3
s_delay_alu instid0(VALU_DEP_4)
s_and_b32 s3, s2, vcc_lo
s_waitcnt vmcnt(0)
flat_store_b32 v[6:7], v3 dlc
s_waitcnt_vscnt null, 0x0
s_and_saveexec_b32 s8, s3
s_cbranch_execz .LBB1_4
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s3, s6, v3
v_add_co_ci_u32_e64 v4, s3, s7, v4, s3
global_load_b32 v5, v[3:4], off
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s8
s_cmp_lt_u32 s5, 2
s_mov_b32 s8, 0
s_waitcnt vmcnt(0) lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_9
s_mov_b32 s9, 1
s_mov_b64 s[6:7], src_shared_base
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_7
.p2align 6
.LBB1_6:
s_or_b32 exec_lo, exec_lo, s6
s_sub_i32 s8, 1, s8
s_lshl_b32 s9, s9, 1
v_mad_u64_u32 v[6:7], null, s8, s5, v[0:1]
s_cmp_ge_u32 s9, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v3, v6, 2, 0
v_cmp_ne_u32_e64 s3, -1, v3
s_delay_alu instid0(VALU_DEP_1)
v_cndmask_b32_e64 v6, 0, v3, s3
v_cndmask_b32_e64 v7, 0, s7, s3
s_waitcnt lgkmcnt(0)
flat_store_b32 v[6:7], v4 dlc
s_waitcnt_vscnt null, 0x0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_9
.LBB1_7:
v_mad_u64_u32 v[3:4], null, s8, s5, v[0:1]
s_mov_b32 s6, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v4, v3, 2, 0
v_cmp_ne_u32_e64 s3, -1, v4
s_delay_alu instid0(VALU_DEP_1)
v_cndmask_b32_e64 v6, 0, v4, s3
v_cndmask_b32_e64 v7, 0, s7, s3
flat_load_b32 v4, v[6:7] glc dlc
s_waitcnt vmcnt(0)
v_cmpx_le_u32_e64 s9, v0
s_cbranch_execz .LBB1_6
v_subrev_nc_u32_e32 v3, s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v3, v3, 2, 0
v_cmp_ne_u32_e64 s3, -1, v3
s_delay_alu instid0(VALU_DEP_1)
v_cndmask_b32_e64 v6, 0, v3, s3
v_cndmask_b32_e64 v7, 0, s7, s3
flat_load_b32 v3, v[6:7] glc dlc
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_f32_e32 v4, v4, v3
s_branch .LBB1_6
.LBB1_9:
s_set_inst_prefetch_distance 0x2
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB1_11
v_mad_u64_u32 v[3:4], null, s8, s5, v[0:1]
s_mov_b64 s[6:7], src_shared_base
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v3, v3, 2, 0
v_cmp_ne_u32_e32 vcc_lo, -1, v3
v_cndmask_b32_e32 v3, 0, v3, vcc_lo
v_cndmask_b32_e64 v4, 0, s7, vcc_lo
s_load_b64 s[6:7], s[0:1], 0x8
flat_load_b32 v3, v[3:4] glc dlc
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_store_b32 v[1:2], v3, off
.LBB1_11:
s_or_b32 exec_lo, exec_lo, s3
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_13
v_mad_u64_u32 v[1:2], null, s8, s5, v[0:1]
s_mov_b64 s[2:3], src_shared_base
s_load_b64 s[0:1], s[0:1], 0x18
s_mov_b32 s5, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v0, v1, 2, 0
v_cmp_ne_u32_e32 vcc_lo, -1, v0
v_cndmask_b32_e32 v0, 0, v0, vcc_lo
v_cndmask_b32_e64 v1, 0, s3, vcc_lo
s_lshl_b64 s[2:3], s[4:5], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
flat_load_b32 v0, v[0:1] glc dlc
s_waitcnt vmcnt(0)
v_mov_b32_e32 v1, 0
s_addc_u32 s1, s1, s3
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v0, v5, v0
global_store_b32 v1, v0, s[0:1]
.LBB1_13:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13hillis_steelePfS_iS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13hillis_steelePfS_iS_, .Lfunc_end1-_Z13hillis_steelePfS_iS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym temp
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5adderPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z5adderPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
- .offset: 152
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13hillis_steelePfS_iS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13hillis_steelePfS_iS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0009604a_00000000-6_scan.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z5adderPfS_iPfS_i
.type _Z27__device_stub__Z5adderPfS_iPfS_i, @function
_Z27__device_stub__Z5adderPfS_iPfS_i:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5adderPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z5adderPfS_iPfS_i, .-_Z27__device_stub__Z5adderPfS_iPfS_i
.globl _Z5adderPfS_i
.type _Z5adderPfS_i, @function
_Z5adderPfS_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z5adderPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z5adderPfS_i, .-_Z5adderPfS_i
.globl _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_
.type _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_, @function
_Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_:
.LFB2084:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13hillis_steelePfS_iS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_, .-_Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_
.globl _Z13hillis_steelePfS_iS_
.type _Z13hillis_steelePfS_iS_, @function
_Z13hillis_steelePfS_iS_:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z13hillis_steelePfS_iS_, .-_Z13hillis_steelePfS_iS_
.globl _Z4scanPKfPfjj
.type _Z4scanPKfPfjj, @function
_Z4scanPKfPfjj:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movl %edx, %r14d
movl %ecx, %ebx
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leal -1(%rcx), %r12d
leal (%rdx,%r12), %eax
movl $0, %edx
divl %ecx
movl %eax, %ebp
movl %r14d, %r13d
salq $2, %r13
leaq 24(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movslq %ebp, %r15
salq $2, %r15
leaq 40(%rsp), %rdi
movl $1, %edx
movq %r15, %rsi
call cudaMallocManaged@PLT
leaq 48(%rsp), %rdi
movl $1, %edx
movq %r15, %rsi
call cudaMallocManaged@PLT
leaq 56(%rsp), %rdi
movl $1, %edx
movl $4, %esi
call cudaMallocManaged@PLT
movl $0, %ecx
movq %r13, %rdx
movq (%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
leal (%rbx,%rbx), %r15d
salq $2, %r15
movl %ebx, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl %ebp, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movq %r15, %r8
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L20:
call cudaDeviceSynchronize@PLT
movl %ebx, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leal 0(%rbp,%r12), %eax
movl $0, %edx
divl %ebx
movl %eax, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movq %r15, %r8
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L21:
call cudaDeviceSynchronize@PLT
movl %ebx, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl %ebp, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $4, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L27
.L22:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %r13, %rdx
movq 32(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L28
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 40(%rsp), %rcx
movl %r14d, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_
jmp .L20
.L26:
movq 56(%rsp), %rcx
movl %ebp, %edx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z38__device_stub__Z13hillis_steelePfS_iS_PfS_iS_
jmp .L21
.L27:
movl %r14d, %edx
movq 48(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z27__device_stub__Z5adderPfS_iPfS_i
jmp .L22
.L28:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z4scanPKfPfjj, .-_Z4scanPKfPfjj
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13hillis_steelePfS_iS_"
.LC1:
.string "_Z5adderPfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13hillis_steelePfS_iS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z5adderPfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "scan.hip"
.globl _Z20__device_stub__adderPfS_i # -- Begin function _Z20__device_stub__adderPfS_i
.p2align 4, 0x90
.type _Z20__device_stub__adderPfS_i,@function
_Z20__device_stub__adderPfS_i: # @_Z20__device_stub__adderPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5adderPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z20__device_stub__adderPfS_i, .Lfunc_end0-_Z20__device_stub__adderPfS_i
.cfi_endproc
# -- End function
.globl _Z28__device_stub__hillis_steelePfS_iS_ # -- Begin function _Z28__device_stub__hillis_steelePfS_iS_
.p2align 4, 0x90
.type _Z28__device_stub__hillis_steelePfS_iS_,@function
_Z28__device_stub__hillis_steelePfS_iS_: # @_Z28__device_stub__hillis_steelePfS_iS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movq %rcx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13hillis_steelePfS_iS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z28__device_stub__hillis_steelePfS_iS_, .Lfunc_end1-_Z28__device_stub__hillis_steelePfS_iS_
.cfi_endproc
# -- End function
.globl _Z4scanPKfPfjj # -- Begin function _Z4scanPKfPfjj
.p2align 4, 0x90
.type _Z4scanPKfPfjj,@function
_Z4scanPKfPfjj: # @_Z4scanPKfPfjj
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %r15d
movl %edx, %ecx
movq %rsi, 176(%rsp) # 8-byte Spill
movq %rdi, %r13
movabsq $4294967296, %rbx # imm = 0x100000000
leal (%r15,%rcx), %eax
decl %eax
xorl %edx, %edx
divl %r15d
movl %eax, %r12d
movq %rcx, 152(%rsp) # 8-byte Spill
movl %ecx, %r14d
shlq $2, %r14
leaq 104(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 80(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movslq %r12d, %rbp
shlq $2, %rbp
leaq 96(%rsp), %rdi
movq %rbp, %rsi
movl $1, %edx
callq hipMallocManaged
leaq 88(%rsp), %rdi
movq %rbp, %rsi
movl $1, %edx
callq hipMallocManaged
leaq 160(%rsp), %rdi
movl $4, %esi
movl $1, %edx
callq hipMallocManaged
movq 104(%rsp), %rdi
movq %r13, %rsi
movq %r14, %rdx
xorl %ecx, %ecx
callq hipMemcpy
leal (%r15,%r15), %ebp
shlq $2, %rbp
leaq (%r12,%rbx), %rdi
movl %r15d, %r13d
orq %rbx, %r13
movq %rdi, 168(%rsp) # 8-byte Spill
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
movq %rbp, %r8
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq 104(%rsp), %rax
movq 80(%rsp), %rcx
movq 96(%rsp), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq 152(%rsp), %rax # 8-byte Reload
movl %eax, 4(%rsp)
movq %rdx, 24(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13hillis_steelePfS_iS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
callq hipDeviceSynchronize
leal (%r15,%r12), %eax
decl %eax
xorl %edx, %edx
divl %r15d
# kill: def $eax killed $eax def $rax
orq %rax, %rbx
movq %rbx, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
movq %rbp, %r8
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
movq 96(%rsp), %rax
movq 88(%rsp), %rcx
movq 160(%rsp), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r12d, 4(%rsp)
movq %rdx, 24(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13hillis_steelePfS_iS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
callq hipDeviceSynchronize
movl $4, %r8d
movq 168(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 80(%rsp), %rax
movq 88(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq 152(%rsp), %rax # 8-byte Reload
movl %eax, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 8(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z5adderPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
callq hipDeviceSynchronize
movq 80(%rsp), %rsi
movq 176(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 104(%rsp), %rdi
callq hipFree
movq 80(%rsp), %rdi
callq hipFree
movq 96(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq 160(%rsp), %rdi
callq hipFree
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z4scanPKfPfjj, .Lfunc_end2-_Z4scanPKfPfjj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5adderPfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13hillis_steelePfS_iS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5adderPfS_i,@object # @_Z5adderPfS_i
.section .rodata,"a",@progbits
.globl _Z5adderPfS_i
.p2align 3, 0x0
_Z5adderPfS_i:
.quad _Z20__device_stub__adderPfS_i
.size _Z5adderPfS_i, 8
.type _Z13hillis_steelePfS_iS_,@object # @_Z13hillis_steelePfS_iS_
.globl _Z13hillis_steelePfS_iS_
.p2align 3, 0x0
_Z13hillis_steelePfS_iS_:
.quad _Z28__device_stub__hillis_steelePfS_iS_
.size _Z13hillis_steelePfS_iS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5adderPfS_i"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z13hillis_steelePfS_iS_"
.size .L__unnamed_2, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__adderPfS_i
.addrsig_sym _Z28__device_stub__hillis_steelePfS_iS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5adderPfS_i
.addrsig_sym _Z13hillis_steelePfS_iS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
inline __global__ void KernelTest(int * a, int * b, int * res, int size)
{
// Calcul de l'indice du tableau
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
res[x] = a[x] + b[x];
}
extern "C"
void runKernel(int * a, int * b, int * res, int size)
{
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(size / dimBlock.x, 1, 1);
KernelTest<<< dimGrid, dimBlock, 0 >>>( a, b, res, size);
} | code for sm_80
Function : _Z10KernelTestPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0007 */
/*0070*/ IMAD.WIDE.U32 R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0007 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0007 */
/*00b0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
inline __global__ void KernelTest(int * a, int * b, int * res, int size)
{
// Calcul de l'indice du tableau
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
res[x] = a[x] + b[x];
}
extern "C"
void runKernel(int * a, int * b, int * res, int size)
{
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(size / dimBlock.x, 1, 1);
KernelTest<<< dimGrid, dimBlock, 0 >>>( a, b, res, size);
} | .file "tmpxft_000d0301_00000000-6_test_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .text._Z10KernelTestPiS_S_i,"axG",@progbits,_Z10KernelTestPiS_S_i,comdat
.weak _Z10KernelTestPiS_S_i
.type _Z10KernelTestPiS_S_i, @function
_Z10KernelTestPiS_S_i:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq %rdi, 8(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 4(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 24(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
leaq _Z10KernelTestPiS_S_i(%rip), %rax
movq %rax, _ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f(%rip)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10KernelTestPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z10KernelTestPiS_S_i, .-_Z10KernelTestPiS_S_i
.text
.globl runKernel
.type runKernel, @function
runKernel:
.LFB2027:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $184, %rsp
.cfi_def_cfa_offset 224
movq %rdi, %r13
movq %rsi, %r12
movq %rdx, %rbp
movl %ecx, %ebx
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
movl $1, 60(%rsp)
movl %ecx, %eax
shrl $6, %eax
movl %eax, 68(%rsp)
movl $1, 72(%rsp)
movl $64, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 56(%rsp), %rdx
movl $1, %ecx
movq 68(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L9:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L15
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movq %r13, 16(%rsp)
movq %r12, 24(%rsp)
movq %rbp, 32(%rsp)
movl %ebx, 12(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 32(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq _Z10KernelTestPiS_S_i(%rip), %rax
movq %rax, _ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f(%rip)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 48(%rsp), %rcx
leaq 40(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
jne .L9
pushq 48(%rsp)
.cfi_def_cfa_offset 232
pushq 48(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z10KernelTestPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L9
.L15:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size runKernel, .-runKernel
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10KernelTestPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10KernelTestPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.weak _ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f
.section .bss._ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f,"awG",@nobits,_ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f,comdat
.align 8
.type _ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f, @gnu_unique_object
.size _ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f, 8
_ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f:
.zero 8
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
inline __global__ void KernelTest(int * a, int * b, int * res, int size)
{
// Calcul de l'indice du tableau
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
res[x] = a[x] + b[x];
}
extern "C"
void runKernel(int * a, int * b, int * res, int size)
{
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(size / dimBlock.x, 1, 1);
KernelTest<<< dimGrid, dimBlock, 0 >>>( a, b, res, size);
} | #include <hip/hip_runtime.h>
inline __global__ void KernelTest(int * a, int * b, int * res, int size)
{
// Calcul de l'indice du tableau
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
res[x] = a[x] + b[x];
}
extern "C"
void runKernel(int * a, int * b, int * res, int size)
{
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(size / dimBlock.x, 1, 1);
KernelTest<<< dimGrid, dimBlock, 0 >>>( a, b, res, size);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
inline __global__ void KernelTest(int * a, int * b, int * res, int size)
{
// Calcul de l'indice du tableau
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
res[x] = a[x] + b[x];
}
extern "C"
void runKernel(int * a, int * b, int * res, int size)
{
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(size / dimBlock.x, 1, 1);
KernelTest<<< dimGrid, dimBlock, 0 >>>( a, b, res, size);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._Z10KernelTestPiS_S_i,"axG",@progbits,_Z10KernelTestPiS_S_i,comdat
.protected _Z10KernelTestPiS_S_i
.globl _Z10KernelTestPiS_S_i
.p2align 8
.type _Z10KernelTestPiS_S_i,@function
_Z10KernelTestPiS_S_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_mov_b32_e32 v2, 0
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10KernelTestPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z10KernelTestPiS_S_i,"axG",@progbits,_Z10KernelTestPiS_S_i,comdat
.Lfunc_end0:
.size _Z10KernelTestPiS_S_i, .Lfunc_end0-_Z10KernelTestPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10KernelTestPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10KernelTestPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
inline __global__ void KernelTest(int * a, int * b, int * res, int size)
{
// Calcul de l'indice du tableau
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
res[x] = a[x] + b[x];
}
extern "C"
void runKernel(int * a, int * b, int * res, int size)
{
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(size / dimBlock.x, 1, 1);
KernelTest<<< dimGrid, dimBlock, 0 >>>( a, b, res, size);
} | .text
.file "test_kernel.hip"
.globl runKernel # -- Begin function runKernel
.p2align 4, 0x90
.type runKernel,@function
runKernel: # @runKernel
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %ecx, %ebx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movl %ecx, %edi
shrl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB0_2
# %bb.1:
movq %r12, 72(%rsp)
movq %r15, 64(%rsp)
movq %r14, 56(%rsp)
movl %ebx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10KernelTestPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB0_2:
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size runKernel, .Lfunc_end0-runKernel
.cfi_endproc
# -- End function
.section .text._Z25__device_stub__KernelTestPiS_S_i,"axG",@progbits,_Z25__device_stub__KernelTestPiS_S_i,comdat
.weak _Z25__device_stub__KernelTestPiS_S_i # -- Begin function _Z25__device_stub__KernelTestPiS_S_i
.p2align 4, 0x90
.type _Z25__device_stub__KernelTestPiS_S_i,@function
_Z25__device_stub__KernelTestPiS_S_i: # @_Z25__device_stub__KernelTestPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10KernelTestPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z25__device_stub__KernelTestPiS_S_i, .Lfunc_end1-_Z25__device_stub__KernelTestPiS_S_i
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10KernelTestPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10KernelTestPiS_S_i,@object # @_Z10KernelTestPiS_S_i
.section .rodata._Z10KernelTestPiS_S_i,"aG",@progbits,_Z10KernelTestPiS_S_i,comdat
.weak _Z10KernelTestPiS_S_i
.p2align 3, 0x0
_Z10KernelTestPiS_S_i:
.quad _Z25__device_stub__KernelTestPiS_S_i
.size _Z10KernelTestPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10KernelTestPiS_S_i"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__KernelTestPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10KernelTestPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10KernelTestPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0007 */
/*0070*/ IMAD.WIDE.U32 R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0007 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0007 */
/*00b0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._Z10KernelTestPiS_S_i,"axG",@progbits,_Z10KernelTestPiS_S_i,comdat
.protected _Z10KernelTestPiS_S_i
.globl _Z10KernelTestPiS_S_i
.p2align 8
.type _Z10KernelTestPiS_S_i,@function
_Z10KernelTestPiS_S_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_mov_b32_e32 v2, 0
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10KernelTestPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z10KernelTestPiS_S_i,"axG",@progbits,_Z10KernelTestPiS_S_i,comdat
.Lfunc_end0:
.size _Z10KernelTestPiS_S_i, .Lfunc_end0-_Z10KernelTestPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10KernelTestPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10KernelTestPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000d0301_00000000-6_test_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .text._Z10KernelTestPiS_S_i,"axG",@progbits,_Z10KernelTestPiS_S_i,comdat
.weak _Z10KernelTestPiS_S_i
.type _Z10KernelTestPiS_S_i, @function
_Z10KernelTestPiS_S_i:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq %rdi, 8(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 4(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 24(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
leaq _Z10KernelTestPiS_S_i(%rip), %rax
movq %rax, _ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f(%rip)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10KernelTestPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z10KernelTestPiS_S_i, .-_Z10KernelTestPiS_S_i
.text
.globl runKernel
.type runKernel, @function
runKernel:
.LFB2027:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $184, %rsp
.cfi_def_cfa_offset 224
movq %rdi, %r13
movq %rsi, %r12
movq %rdx, %rbp
movl %ecx, %ebx
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
movl $1, 60(%rsp)
movl %ecx, %eax
shrl $6, %eax
movl %eax, 68(%rsp)
movl $1, 72(%rsp)
movl $64, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 56(%rsp), %rdx
movl $1, %ecx
movq 68(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L9:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L15
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
movq %r13, 16(%rsp)
movq %r12, 24(%rsp)
movq %rbp, 32(%rsp)
movl %ebx, 12(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 32(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq _Z10KernelTestPiS_S_i(%rip), %rax
movq %rax, _ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f(%rip)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 48(%rsp), %rcx
leaq 40(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
jne .L9
pushq 48(%rsp)
.cfi_def_cfa_offset 232
pushq 48(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z10KernelTestPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L9
.L15:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size runKernel, .-runKernel
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10KernelTestPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10KernelTestPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.weak _ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f
.section .bss._ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f,"awG",@nobits,_ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f,comdat
.align 8
.type _ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f, @gnu_unique_object
.size _ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f, 8
_ZZ35__device_stub__Z10KernelTestPiS_S_iPiS_S_iE3__f:
.zero 8
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "test_kernel.hip"
.globl runKernel # -- Begin function runKernel
.p2align 4, 0x90
.type runKernel,@function
runKernel: # @runKernel
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %ecx, %ebx
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %r12
movl %ecx, %edi
shrl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB0_2
# %bb.1:
movq %r12, 72(%rsp)
movq %r15, 64(%rsp)
movq %r14, 56(%rsp)
movl %ebx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10KernelTestPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB0_2:
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size runKernel, .Lfunc_end0-runKernel
.cfi_endproc
# -- End function
.section .text._Z25__device_stub__KernelTestPiS_S_i,"axG",@progbits,_Z25__device_stub__KernelTestPiS_S_i,comdat
.weak _Z25__device_stub__KernelTestPiS_S_i # -- Begin function _Z25__device_stub__KernelTestPiS_S_i
.p2align 4, 0x90
.type _Z25__device_stub__KernelTestPiS_S_i,@function
_Z25__device_stub__KernelTestPiS_S_i: # @_Z25__device_stub__KernelTestPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10KernelTestPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z25__device_stub__KernelTestPiS_S_i, .Lfunc_end1-_Z25__device_stub__KernelTestPiS_S_i
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10KernelTestPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10KernelTestPiS_S_i,@object # @_Z10KernelTestPiS_S_i
.section .rodata._Z10KernelTestPiS_S_i,"aG",@progbits,_Z10KernelTestPiS_S_i,comdat
.weak _Z10KernelTestPiS_S_i
.p2align 3, 0x0
_Z10KernelTestPiS_S_i:
.quad _Z25__device_stub__KernelTestPiS_S_i
.size _Z10KernelTestPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10KernelTestPiS_S_i"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__KernelTestPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10KernelTestPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
typedef int* Vector;
int techo(int tam, int block_size)
{
return (tam / block_size) + (tam % block_size == 0? 0: 1);
}
__global__ void multiplicarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
{
int suma = 0;
for(int k = 0; k < tam; k++)
suma += A[idy + tam + k] * B [k * tam + idx];
C[index] = suma;
}
}
__global__ void sumarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
C[index] = A[index] + B[index];
}
void imprimirMatriz(int *A, int tam)
{
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
printf("%d ", A[tam * i + j]);
}
printf("\n");
}
}
Vector newMatrizVectorizada(int tam, int val)
{
Vector v = (Vector) malloc(sizeof(int) * tam * tam);
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
v[i * tam + j] = val;
}
}
return v;
}
int main(int argc, char **argv)
{
Vector A_h, B_h, C_h;
Vector A_d, B_d, C_d;
int tam = 12;
if(argc > 1)
tam = atoi(argv[1]);
int N = tam * tam;
size_t size = N * sizeof(int);
A_h = newMatrizVectorizada(tam, 2);
B_h = newMatrizVectorizada(tam, 3);
C_h = newMatrizVectorizada(tam, 0);
cudaMalloc((void**) &A_d, size);
cudaMalloc((void**) &B_d, size);
cudaMalloc((void**) &C_d, size);
cudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, size, cudaMemcpyHostToDevice);
int BLOCK_SIZE = 4;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 n_blocks(techo(tam, block_size.x), techo(tam, block_size.y));
multiplicarMatrices<<< n_blocks, block_size >>> (C_d, A_d, B_d, tam);
cudaMemcpy(C_h, C_d, size, cudaMemcpyDeviceToHost);
imprimirMatriz(C_h, tam);
sumarMatrices <<< n_blocks, block_size>>> (C_d, A_d, B_d, tam);
cudaMemcpy(C_h, C_d, size, cudaMemcpyDeviceToHost);
printf("==============================================RESULTADO============================================\n");
imprimirMatriz(C_h, tam);
printf("===================================================================================================\n");
free(A_h);
free(B_h);
free(C_h);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
} | code for sm_80
Function : _Z13sumarMatricesPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */
/* 0x000fe200078e0200 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fc800078e0207 */
/*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x0c0fe400078e0207 */
/*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x160] ; /* 0x0000580000067625 */
/* 0x000fe200078e0207 */
/*0120*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z19multiplicarMatricesPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e280000002100 */
/*0030*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R5 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R5, R0, c[0x0][0x4], R3 ; /* 0x0000010000057a24 */
/* 0x002fca00078e0203 */
/*0080*/ ISETP.GE.OR P0, PT, R5, c[0x0][0x178], P0 ; /* 0x00005e0005007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R4, c[0x0][0x178] ; /* 0x00005e0000047a02 */
/* 0x000fe20000000f00 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R18, -RZ, RZ, 0, 0 ; /* 0x00000000ff127435 */
/* 0x000fe400000001ff */
/*00d0*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fda0003f06270 */
/*00e0*/ @!P0 BRA 0xc10 ; /* 0x00000b2000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R6, R4.reuse, -0x1, RZ ; /* 0xffffffff04067810 */
/* 0x040fe40007ffe0ff */
/*0100*/ LOP3.LUT R7, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304077812 */
/* 0x000fe400078ec0ff */
/*0110*/ ISETP.GE.U32.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fe40003f06070 */
/*0120*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fe40000000f00 */
/*0130*/ MOV R18, RZ ; /* 0x000000ff00127202 */
/* 0x000fd20000000f00 */
/*0140*/ @!P0 BRA 0xb00 ; /* 0x000009b000008947 */
/* 0x000fea0003800000 */
/*0150*/ IADD3 R8, -R7, c[0x0][0x178], RZ ; /* 0x00005e0007087a10 */
/* 0x000fe20007ffe1ff */
/*0160*/ HFMA2.MMA R27, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff1b7435 */
/* 0x000fe200000001ff */
/*0170*/ IADD3 R9, R3, c[0x0][0x178], RZ ; /* 0x00005e0003097a10 */
/* 0x000fe20007ffe0ff */
/*0180*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe20000000a00 */
/*0190*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f04270 */
/*01a0*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fe20000000f00 */
/*01b0*/ IMAD R9, R0, c[0x0][0x4], R9 ; /* 0x0000010000097a24 */
/* 0x000fc800078e0209 */
/*01c0*/ IMAD.WIDE R26, R2, R27, c[0x0][0x170] ; /* 0x00005c00021a7625 */
/* 0x000fcc00078e021b */
/*01d0*/ @!P0 BRA 0x970 ; /* 0x0000079000008947 */
/* 0x000fea0003800000 */
/*01e0*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*01f0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0200*/ @!P1 BRA 0x6b0 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*0210*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0220*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0230*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x0000a2000c1e1900 */
/*0240*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0250*/ IMAD.WIDE R12, R9, 0x4, R12 ; /* 0x00000004090c7825 */
/* 0x000fca00078e020c */
/*0260*/ LDG.E R28, [R12.64] ; /* 0x000000040c1c7981 */
/* 0x000ea2000c1e1900 */
/*0270*/ IMAD.WIDE R16, R4, 0x4, R26 ; /* 0x0000000404107825 */
/* 0x000fc600078e021a */
/*0280*/ LDG.E R23, [R12.64+0x4] ; /* 0x000004040c177981 */
/* 0x000ee6000c1e1900 */
/*0290*/ IMAD.WIDE R10, R4.reuse, 0x4, R16 ; /* 0x00000004040a7825 */
/* 0x040fe200078e0210 */
/*02a0*/ LDG.E R22, [R16.64] ; /* 0x0000000410167981 */
/* 0x0002e8000c1e1900 */
/*02b0*/ LDG.E R25, [R10.64] ; /* 0x000000040a197981 */
/* 0x000968000c1e1900 */
/*02c0*/ LDG.E R24, [R12.64+0x8] ; /* 0x000008040c187981 */
/* 0x000f68000c1e1900 */
/*02d0*/ LDG.E R14, [R12.64+0xc] ; /* 0x00000c040c0e7981 */
/* 0x000f62000c1e1900 */
/*02e0*/ IMAD.WIDE R10, R4, 0x4, R10 ; /* 0x00000004040a7825 */
/* 0x010fc600078e020a */
/*02f0*/ LDG.E R26, [R12.64+0x10] ; /* 0x000010040c1a7981 */
/* 0x001f28000c1e1900 */
/*0300*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */
/* 0x000122000c1e1900 */
/*0310*/ IMAD.WIDE R20, R4, 0x4, R10 ; /* 0x0000000404147825 */
/* 0x000fca00078e020a */
/*0320*/ LDG.E R27, [R20.64] ; /* 0x00000004141b7981 */
/* 0x000122000c1e1900 */
/*0330*/ IMAD.WIDE R16, R4, 0x4, R20 ; /* 0x0000000404107825 */
/* 0x002fc600078e0214 */
/*0340*/ LDG.E R10, [R12.64+0x18] ; /* 0x000018040c0a7981 */
/* 0x001f28000c1e1900 */
/*0350*/ LDG.E R21, [R12.64+0x1c] ; /* 0x00001c040c157981 */
/* 0x000f22000c1e1900 */
/*0360*/ IMAD R28, R19, R28, R18 ; /* 0x0000001c131c7224 */
/* 0x004fc600078e0212 */
/*0370*/ LDG.E R18, [R12.64+0x14] ; /* 0x000014040c127981 */
/* 0x000ea8000c1e1900 */
/*0380*/ LDG.E R19, [R16.64] ; /* 0x0000000410137981 */
/* 0x0000a2000c1e1900 */
/*0390*/ IMAD R28, R22, R23, R28 ; /* 0x00000017161c7224 */
/* 0x008fe400078e021c */
/*03a0*/ IMAD.WIDE R16, R4, 0x4, R16 ; /* 0x0000000404107825 */
/* 0x001fca00078e0210 */
/*03b0*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */
/* 0x000ae2000c1e1900 */
/*03c0*/ IMAD.WIDE R22, R4, 0x4, R16 ; /* 0x0000000404167825 */
/* 0x000fca00078e0210 */
/*03d0*/ LDG.E R20, [R22.64] ; /* 0x0000000416147981 */
/* 0x0000e2000c1e1900 */
/*03e0*/ IMAD R16, R25, R24, R28 ; /* 0x0000001819107224 */
/* 0x020fe400078e021c */
/*03f0*/ IMAD.WIDE R24, R4.reuse, 0x4, R22 ; /* 0x0000000404187825 */
/* 0x040fe200078e0216 */
/*0400*/ LDG.E R28, [R12.64+0x20] ; /* 0x000020040c1c7981 */
/* 0x000f68000c1e1900 */
/*0410*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x000362000c1e1900 */
/*0420*/ IMAD R16, R15, R14, R16 ; /* 0x0000000e0f107224 */
/* 0x010fe400078e0210 */
/*0430*/ IMAD.WIDE R14, R4, 0x4, R24 ; /* 0x00000004040e7825 */
/* 0x000fe200078e0218 */
/*0440*/ LDG.E R23, [R12.64+0x24] ; /* 0x000024040c177981 */
/* 0x001f26000c1e1900 */
/*0450*/ IMAD R26, R27, R26, R16 ; /* 0x0000001a1b1a7224 */
/* 0x000fc400078e0210 */
/*0460*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x000fe200078e020e */
/*0470*/ LDG.E R27, [R12.64+0x28] ; /* 0x000028040c1b7981 */
/* 0x000f28000c1e1900 */
/*0480*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000128000c1e1900 */
/*0490*/ LDG.E R22, [R16.64] ; /* 0x0000000410167981 */
/* 0x000328000c1e1900 */
/*04a0*/ LDG.E R15, [R12.64+0x30] ; /* 0x000030040c0f7981 */
/* 0x001f22000c1e1900 */
/*04b0*/ IMAD R26, R19, R18, R26 ; /* 0x00000012131a7224 */
/* 0x004fc400078e021a */
/*04c0*/ IMAD.WIDE R18, R4, 0x4, R16 ; /* 0x0000000404127825 */
/* 0x000fc800078e0210 */
/*04d0*/ IMAD R26, R11, R10, R26 ; /* 0x0000000a0b1a7224 */
/* 0x008fe400078e021a */
/*04e0*/ IMAD.WIDE R10, R4, 0x4, R18 ; /* 0x00000004040a7825 */
/* 0x000fe400078e0212 */
/*04f0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x0000a4000c1e1900 */
/*0500*/ IMAD R24, R20, R21, R26 ; /* 0x0000001514187224 */
/* 0x002fe400078e021a */
/*0510*/ IMAD.WIDE R20, R4, 0x4, R10 ; /* 0x0000000404147825 */
/* 0x000fe200078e020a */
/*0520*/ LDG.E R26, [R12.64+0x2c] ; /* 0x00002c040c1a7981 */
/* 0x000ea8000c1e1900 */
/*0530*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x0002e2000c1e1900 */
/*0540*/ IMAD R28, R29, R28, R24 ; /* 0x0000001c1d1c7224 */
/* 0x020fc600078e0218 */
/*0550*/ LDG.E R19, [R12.64+0x38] ; /* 0x000038040c137981 */
/* 0x001f62000c1e1900 */
/*0560*/ IMAD.WIDE R24, R4, 0x4, R20 ; /* 0x0000000404187825 */
/* 0x000fc600078e0214 */
/*0570*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000168000c1e1900 */
/*0580*/ LDG.E R11, [R12.64+0x34] ; /* 0x000034040c0b7981 */
/* 0x002f62000c1e1900 */
/*0590*/ IMAD.WIDE R16, R4, 0x4, R24 ; /* 0x0000000404107825 */
/* 0x000fc600078e0218 */
/*05a0*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x000368000c1e1900 */
/*05b0*/ LDG.E R21, [R16.64] ; /* 0x0000000410157981 */
/* 0x001f68000c1e1900 */
/*05c0*/ LDG.E R24, [R12.64+0x3c] ; /* 0x00003c040c187981 */
/* 0x002f62000c1e1900 */
/*05d0*/ IMAD R14, R14, R23, R28 ; /* 0x000000170e0e7224 */
/* 0x010fe200078e021c */
/*05e0*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */
/* 0x000fc60007ffe0ff */
/*05f0*/ IMAD R27, R22, R27, R14 ; /* 0x0000001b161b7224 */
/* 0x000fe200078e020e */
/*0600*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe20003f24270 */
/*0610*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0620*/ IADD3 R6, R6, 0x10, RZ ; /* 0x0000001006067810 */
/* 0x000fc60007ffe0ff */
/*0630*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0640*/ IMAD R18, R18, R26, R27 ; /* 0x0000001a12127224 */
/* 0x004fc800078e021b */
/*0650*/ IMAD R10, R10, R15, R18 ; /* 0x0000000f0a0a7224 */
/* 0x008fe400078e0212 */
/*0660*/ IMAD.WIDE R26, R4, 0x4, R16 ; /* 0x00000004041a7825 */
/* 0x000fc800078e0210 */
/*0670*/ IMAD R10, R20, R11, R10 ; /* 0x0000000b140a7224 */
/* 0x020fc800078e020a */
/*0680*/ IMAD R10, R29, R19, R10 ; /* 0x000000131d0a7224 */
/* 0x000fc800078e020a */
/*0690*/ IMAD R18, R21, R24, R10 ; /* 0x0000001815127224 */
/* 0x000fe200078e020a */
/*06a0*/ @P1 BRA 0x220 ; /* 0xfffffb7000001947 */
/* 0x000fea000383ffff */
/*06b0*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f24270 */
/*06c0*/ @!P1 BRA 0x950 ; /* 0x0000028000009947 */
/* 0x000fea0003800000 */
/*06d0*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe20008000f00 */
/*06e0*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x000ea2000c1e1900 */
/*06f0*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fca0008000f00 */
/*0700*/ IMAD.WIDE R10, R9, 0x4, R10 ; /* 0x00000004090a7825 */
/* 0x000fca00078e020a */
/*0710*/ LDG.E R24, [R10.64] ; /* 0x000000040a187981 */
/* 0x000ea2000c1e1900 */
/*0720*/ IMAD.WIDE R22, R4, 0x4, R26 ; /* 0x0000000404167825 */
/* 0x000fc600078e021a */
/*0730*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee6000c1e1900 */
/*0740*/ IMAD.WIDE R14, R4.reuse, 0x4, R22 ; /* 0x00000004040e7825 */
/* 0x040fe200078e0216 */
/*0750*/ LDG.E R29, [R10.64+0x8] ; /* 0x000008040a1d7981 */
/* 0x000f28000c1e1900 */
/*0760*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x0000e2000c1e1900 */
/*0770*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x000fc600078e020e */
/*0780*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000326000c1e1900 */
/*0790*/ IMAD.WIDE R12, R4.reuse, 0x4, R16 ; /* 0x00000004040c7825 */
/* 0x040fe200078e0210 */
/*07a0*/ LDG.E R28, [R10.64+0xc] ; /* 0x00000c040a1c7981 */
/* 0x000f68000c1e1900 */
/*07b0*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000362000c1e1900 */
/*07c0*/ IMAD.WIDE R20, R4, 0x4, R12 ; /* 0x0000000404147825 */
/* 0x000fc600078e020c */
/*07d0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000328000c1e1900 */
/*07e0*/ LDG.E R23, [R10.64+0x14] ; /* 0x000014040a177981 */
/* 0x001f68000c1e1900 */
/*07f0*/ LDG.E R16, [R10.64+0x18] ; /* 0x000018040a107981 */
/* 0x002f68000c1e1900 */
/*0800*/ LDG.E R13, [R10.64+0x10] ; /* 0x000010040a0d7981 */
/* 0x000f62000c1e1900 */
/*0810*/ IMAD R24, R19, R24, R18 ; /* 0x0000001813187224 */
/* 0x004fc400078e0212 */
/*0820*/ IMAD.WIDE R18, R4.reuse, 0x4, R20 ; /* 0x0000000404127825 */
/* 0x040fe400078e0214 */
/*0830*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000ea8000c1e1900 */
/*0840*/ IMAD.WIDE R26, R4, 0x4, R18 ; /* 0x00000004041a7825 */
/* 0x000fe200078e0212 */
/*0850*/ LDG.E R15, [R18.64] ; /* 0x00000004120f7981 */
/* 0x0000a8000c1e1900 */
/*0860*/ LDG.E R19, [R10.64+0x1c] ; /* 0x00001c040a137981 */
/* 0x001ea8000c1e1900 */
/*0870*/ LDG.E R18, [R26.64] ; /* 0x000000041a127981 */
/* 0x0000a2000c1e1900 */
/*0880*/ IMAD R22, R22, R25, R24 ; /* 0x0000001916167224 */
/* 0x008fc800078e0218 */
/*0890*/ IMAD R14, R14, R29, R22 ; /* 0x0000001d0e0e7224 */
/* 0x010fc800078e0216 */
/*08a0*/ IMAD R14, R17, R28, R14 ; /* 0x0000001c110e7224 */
/* 0x020fc800078e020e */
/*08b0*/ IMAD R12, R12, R13, R14 ; /* 0x0000000d0c0c7224 */
/* 0x000fe200078e020e */
/*08c0*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*08d0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0e170 */
/*08e0*/ IMAD.WIDE R26, R4, 0x4, R26 ; /* 0x00000004041a7825 */
/* 0x001fe200078e021a */
/*08f0*/ IADD3 R6, R6, 0x8, RZ ; /* 0x0000000806067810 */
/* 0x000fe40007ffe0ff */
/*0900*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */
/* 0x000fe20007ffe0ff */
/*0910*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0920*/ IMAD R12, R20, R23, R12 ; /* 0x00000017140c7224 */
/* 0x004fc800078e020c */
/*0930*/ IMAD R12, R15, R16, R12 ; /* 0x000000100f0c7224 */
/* 0x000fc800078e020c */
/*0940*/ IMAD R18, R18, R19, R12 ; /* 0x0000001312127224 */
/* 0x000fe400078e020c */
/*0950*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fda0000705670 */
/*0960*/ @!P0 BRA 0xb00 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*0970*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe20008000f00 */
/*0980*/ IMAD.WIDE R12, R4, 0x4, R26 ; /* 0x00000004040c7825 */
/* 0x000fe200078e021a */
/*0990*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe20008000f00 */
/*09a0*/ LDG.E R27, [R26.64] ; /* 0x000000041a1b7981 */
/* 0x000ea8000c1e1900 */
/*09b0*/ IMAD.WIDE R10, R9, 0x4, R10 ; /* 0x00000004090a7825 */
/* 0x000fc800078e020a */
/*09c0*/ IMAD.WIDE R14, R4.reuse, 0x4, R12 ; /* 0x00000004040e7825 */
/* 0x040fe200078e020c */
/*09d0*/ LDG.E R19, [R10.64] ; /* 0x000000040a137981 */
/* 0x000ea8000c1e1900 */
/*09e0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000ee2000c1e1900 */
/*09f0*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x000fc600078e020e */
/*0a00*/ LDG.E R20, [R10.64+0x4] ; /* 0x000004040a147981 */
/* 0x000ee8000c1e1900 */
/*0a10*/ LDG.E R22, [R14.64] ; /* 0x000000040e167981 */
/* 0x000f28000c1e1900 */
/*0a20*/ LDG.E R21, [R10.64+0x8] ; /* 0x000008040a157981 */
/* 0x000f28000c1e1900 */
/*0a30*/ LDG.E R23, [R10.64+0xc] ; /* 0x00000c040a177981 */
/* 0x000f68000c1e1900 */
/*0a40*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */
/* 0x000f62000c1e1900 */
/*0a50*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */
/* 0x000fc80007ffe0ff */
/*0a60*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f05270 */
/*0a70*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a80*/ IADD3 R6, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x000fc60007ffe0ff */
/*0a90*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0aa0*/ IMAD R19, R27, R19, R18 ; /* 0x000000131b137224 */
/* 0x004fc800078e0212 */
/*0ab0*/ IMAD R19, R12, R20, R19 ; /* 0x000000140c137224 */
/* 0x008fe400078e0213 */
/*0ac0*/ IMAD.WIDE R26, R4, 0x4, R16 ; /* 0x00000004041a7825 */
/* 0x000fc800078e0210 */
/*0ad0*/ IMAD R19, R22, R21, R19 ; /* 0x0000001516137224 */
/* 0x010fc800078e0213 */
/*0ae0*/ IMAD R18, R24, R23, R19 ; /* 0x0000001718127224 */
/* 0x020fe200078e0213 */
/*0af0*/ @P0 BRA 0x970 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0b00*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fda0003f05270 */
/*0b10*/ @!P0 BRA 0xc10 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*0b20*/ HFMA2.MMA R10, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0a7435 */
/* 0x000fe200000001ff */
/*0b30*/ IADD3 R3, R3, c[0x0][0x178], R6 ; /* 0x00005e0003037a10 */
/* 0x000fe20007ffe006 */
/*0b40*/ IMAD R6, R6, c[0x0][0x178], R2 ; /* 0x00005e0006067a24 */
/* 0x000fc800078e0202 */
/*0b50*/ IMAD R3, R0, c[0x0][0x4], R3 ; /* 0x0000010000037a24 */
/* 0x000fc800078e0203 */
/*0b60*/ IMAD.WIDE R8, R3, R10, c[0x0][0x168] ; /* 0x00005a0003087625 */
/* 0x000fc800078e020a */
/*0b70*/ IMAD.WIDE R10, R6, R10, c[0x0][0x170] ; /* 0x00005c00060a7625 */
/* 0x000fca00078e020a */
/*0b80*/ LDG.E R3, [R10.64] ; /* 0x000000040a037981 */
/* 0x0000a8000c1e1900 */
/*0b90*/ LDG.E R0, [R8.64] ; /* 0x0000000408007981 */
/* 0x0002a2000c1e1900 */
/*0ba0*/ IADD3 R7, R7, -0x1, RZ ; /* 0xffffffff07077810 */
/* 0x000fc80007ffe0ff */
/*0bb0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*0bc0*/ IMAD.WIDE R10, R4, 0x4, R10 ; /* 0x00000004040a7825 */
/* 0x001fe200078e020a */
/*0bd0*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x002fc80007f3e0ff */
/*0be0*/ IADD3.X R9, RZ, R9, RZ, P1, !PT ; /* 0x00000009ff097210 */
/* 0x000fe20000ffe4ff */
/*0bf0*/ IMAD R18, R3, R0, R18 ; /* 0x0000000003127224 */
/* 0x004fcc00078e0212 */
/*0c00*/ @P0 BRA 0xb80 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0c10*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fe20000000f00 */
/*0c20*/ IMAD R2, R5, c[0x0][0x178], R2 ; /* 0x00005e0005027a24 */
/* 0x000fc800078e0202 */
/*0c30*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0c40*/ STG.E [R2.64], R18 ; /* 0x0000001202007986 */
/* 0x000fe2000c101904 */
/*0c50*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c60*/ BRA 0xc60; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
typedef int* Vector;
int techo(int tam, int block_size)
{
return (tam / block_size) + (tam % block_size == 0? 0: 1);
}
__global__ void multiplicarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
{
int suma = 0;
for(int k = 0; k < tam; k++)
suma += A[idy + tam + k] * B [k * tam + idx];
C[index] = suma;
}
}
__global__ void sumarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
C[index] = A[index] + B[index];
}
void imprimirMatriz(int *A, int tam)
{
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
printf("%d ", A[tam * i + j]);
}
printf("\n");
}
}
Vector newMatrizVectorizada(int tam, int val)
{
Vector v = (Vector) malloc(sizeof(int) * tam * tam);
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
v[i * tam + j] = val;
}
}
return v;
}
int main(int argc, char **argv)
{
Vector A_h, B_h, C_h;
Vector A_d, B_d, C_d;
int tam = 12;
if(argc > 1)
tam = atoi(argv[1]);
int N = tam * tam;
size_t size = N * sizeof(int);
A_h = newMatrizVectorizada(tam, 2);
B_h = newMatrizVectorizada(tam, 3);
C_h = newMatrizVectorizada(tam, 0);
cudaMalloc((void**) &A_d, size);
cudaMalloc((void**) &B_d, size);
cudaMalloc((void**) &C_d, size);
cudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, size, cudaMemcpyHostToDevice);
int BLOCK_SIZE = 4;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 n_blocks(techo(tam, block_size.x), techo(tam, block_size.y));
multiplicarMatrices<<< n_blocks, block_size >>> (C_d, A_d, B_d, tam);
cudaMemcpy(C_h, C_d, size, cudaMemcpyDeviceToHost);
imprimirMatriz(C_h, tam);
sumarMatrices <<< n_blocks, block_size>>> (C_d, A_d, B_d, tam);
cudaMemcpy(C_h, C_d, size, cudaMemcpyDeviceToHost);
printf("==============================================RESULTADO============================================\n");
imprimirMatriz(C_h, tam);
printf("===================================================================================================\n");
free(A_h);
free(B_h);
free(C_h);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
} | .file "tmpxft_0010c29c_00000000-6_CUDA_matrix.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z5techoii
.type _Z5techoii, @function
_Z5techoii:
.LFB2057:
.cfi_startproc
endbr64
movl %edi, %eax
cltd
idivl %esi
cmpl $1, %edx
sbbl $-1, %eax
ret
.cfi_endproc
.LFE2057:
.size _Z5techoii, .-_Z5techoii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d "
.LC1:
.string "\n"
.text
.globl _Z14imprimirMatrizPii
.type _Z14imprimirMatrizPii, @function
_Z14imprimirMatrizPii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %esi, 12(%rsp)
testl %esi, %esi
jle .L4
movslq %esi, %r14
leaq 0(,%r14,4), %r15
leaq (%rdi,%r15), %rbp
negq %r14
salq $2, %r14
movl $0, %r13d
leaq .LC0(%rip), %r12
.L6:
leaq 0(%rbp,%r14), %rbx
.L7:
movl (%rbx), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L7
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addq %r15, %rbp
cmpl %r13d, 12(%rsp)
jne .L6
.L4:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z14imprimirMatrizPii, .-_Z14imprimirMatrizPii
.globl _Z20newMatrizVectorizadaii
.type _Z20newMatrizVectorizadaii, @function
_Z20newMatrizVectorizadaii:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %edi, %ebp
movl %esi, %ebx
movslq %edi, %r12
movq %r12, %rdi
imulq %r12, %rdi
salq $2, %rdi
call malloc@PLT
testl %ebp, %ebp
jle .L10
leaq 0(,%r12,4), %r8
leaq (%rax,%r8), %rcx
negq %r12
leaq 0(,%r12,4), %rdi
movl $0, %esi
.L12:
leaq (%rcx,%rdi), %rdx
.L13:
movl %ebx, (%rdx)
addq $4, %rdx
cmpq %rcx, %rdx
jne .L13
addl $1, %esi
addq %r8, %rcx
cmpl %esi, %ebp
jne .L12
.L10:
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z20newMatrizVectorizadaii, .-_Z20newMatrizVectorizadaii
.globl _Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i
.type _Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i, @function
_Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19multiplicarMatricesPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i, .-_Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i
.globl _Z19multiplicarMatricesPiS_S_i
.type _Z19multiplicarMatricesPiS_S_i, @function
_Z19multiplicarMatricesPiS_S_i:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z19multiplicarMatricesPiS_S_i, .-_Z19multiplicarMatricesPiS_S_i
.globl _Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i
.type _Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i, @function
_Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L28
.L24:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L29
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13sumarMatricesPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L24
.L29:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i, .-_Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i
.globl _Z13sumarMatricesPiS_S_i
.type _Z13sumarMatricesPiS_S_i, @function
_Z13sumarMatricesPiS_S_i:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z13sumarMatricesPiS_S_i, .-_Z13sumarMatricesPiS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "==============================================RESULTADO============================================\n"
.align 8
.LC3:
.string "===================================================================================================\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $12, %ebx
cmpl $1, %edi
jg .L39
.L33:
movl %ebx, %ebp
imull %ebx, %ebp
movslq %ebp, %rbp
salq $2, %rbp
movl $2, %esi
movl %ebx, %edi
call _Z20newMatrizVectorizadaii
movq %rax, %r14
movl $3, %esi
movl %ebx, %edi
call _Z20newMatrizVectorizadaii
movq %rax, %r13
movl $0, %esi
movl %ebx, %edi
call _Z20newMatrizVectorizadaii
movq %rax, %r12
leaq 8(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbp, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 40(%rsp)
testb $3, %bl
setne %dl
movzbl %dl, %edx
leal 3(%rbx), %eax
testl %ebx, %ebx
cmovns %ebx, %eax
sarl $2, %eax
addl %edx, %eax
movl %eax, 44(%rsp)
movl %eax, 48(%rsp)
movl $1, 52(%rsp)
movl $4, 32(%rsp)
movl $4, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L40
.L34:
movl $2, %ecx
movq %rbp, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl %ebx, %esi
movq %r12, %rdi
call _Z14imprimirMatrizPii
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl 52(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L41
.L35:
movl $2, %ecx
movq %rbp, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %esi
movq %r12, %rdi
call _Z14imprimirMatrizPii
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r14, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L42
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %ebx
jmp .L33
.L40:
movl %ebx, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i
jmp .L34
.L41:
movl %ebx, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i
jmp .L35
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z13sumarMatricesPiS_S_i"
.section .rodata.str1.8
.align 8
.LC5:
.string "_Z19multiplicarMatricesPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z13sumarMatricesPiS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z19multiplicarMatricesPiS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
typedef int* Vector;
int techo(int tam, int block_size)
{
return (tam / block_size) + (tam % block_size == 0? 0: 1);
}
__global__ void multiplicarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
{
int suma = 0;
for(int k = 0; k < tam; k++)
suma += A[idy + tam + k] * B [k * tam + idx];
C[index] = suma;
}
}
__global__ void sumarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
C[index] = A[index] + B[index];
}
void imprimirMatriz(int *A, int tam)
{
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
printf("%d ", A[tam * i + j]);
}
printf("\n");
}
}
Vector newMatrizVectorizada(int tam, int val)
{
Vector v = (Vector) malloc(sizeof(int) * tam * tam);
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
v[i * tam + j] = val;
}
}
return v;
}
int main(int argc, char **argv)
{
Vector A_h, B_h, C_h;
Vector A_d, B_d, C_d;
int tam = 12;
if(argc > 1)
tam = atoi(argv[1]);
int N = tam * tam;
size_t size = N * sizeof(int);
A_h = newMatrizVectorizada(tam, 2);
B_h = newMatrizVectorizada(tam, 3);
C_h = newMatrizVectorizada(tam, 0);
cudaMalloc((void**) &A_d, size);
cudaMalloc((void**) &B_d, size);
cudaMalloc((void**) &C_d, size);
cudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, size, cudaMemcpyHostToDevice);
int BLOCK_SIZE = 4;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 n_blocks(techo(tam, block_size.x), techo(tam, block_size.y));
multiplicarMatrices<<< n_blocks, block_size >>> (C_d, A_d, B_d, tam);
cudaMemcpy(C_h, C_d, size, cudaMemcpyDeviceToHost);
imprimirMatriz(C_h, tam);
sumarMatrices <<< n_blocks, block_size>>> (C_d, A_d, B_d, tam);
cudaMemcpy(C_h, C_d, size, cudaMemcpyDeviceToHost);
printf("==============================================RESULTADO============================================\n");
imprimirMatriz(C_h, tam);
printf("===================================================================================================\n");
free(A_h);
free(B_h);
free(C_h);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
typedef int* Vector;
int techo(int tam, int block_size)
{
return (tam / block_size) + (tam % block_size == 0? 0: 1);
}
__global__ void multiplicarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
{
int suma = 0;
for(int k = 0; k < tam; k++)
suma += A[idy + tam + k] * B [k * tam + idx];
C[index] = suma;
}
}
__global__ void sumarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
C[index] = A[index] + B[index];
}
void imprimirMatriz(int *A, int tam)
{
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
printf("%d ", A[tam * i + j]);
}
printf("\n");
}
}
Vector newMatrizVectorizada(int tam, int val)
{
Vector v = (Vector) malloc(sizeof(int) * tam * tam);
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
v[i * tam + j] = val;
}
}
return v;
}
int main(int argc, char **argv)
{
Vector A_h, B_h, C_h;
Vector A_d, B_d, C_d;
int tam = 12;
if(argc > 1)
tam = atoi(argv[1]);
int N = tam * tam;
size_t size = N * sizeof(int);
A_h = newMatrizVectorizada(tam, 2);
B_h = newMatrizVectorizada(tam, 3);
C_h = newMatrizVectorizada(tam, 0);
hipMalloc((void**) &A_d, size);
hipMalloc((void**) &B_d, size);
hipMalloc((void**) &C_d, size);
hipMemcpy(A_d, A_h, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, size, hipMemcpyHostToDevice);
int BLOCK_SIZE = 4;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 n_blocks(techo(tam, block_size.x), techo(tam, block_size.y));
multiplicarMatrices<<< n_blocks, block_size >>> (C_d, A_d, B_d, tam);
hipMemcpy(C_h, C_d, size, hipMemcpyDeviceToHost);
imprimirMatriz(C_h, tam);
sumarMatrices <<< n_blocks, block_size>>> (C_d, A_d, B_d, tam);
hipMemcpy(C_h, C_d, size, hipMemcpyDeviceToHost);
printf("==============================================RESULTADO============================================\n");
imprimirMatriz(C_h, tam);
printf("===================================================================================================\n");
free(A_h);
free(B_h);
free(C_h);
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
typedef int* Vector;
int techo(int tam, int block_size)
{
return (tam / block_size) + (tam % block_size == 0? 0: 1);
}
__global__ void multiplicarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
{
int suma = 0;
for(int k = 0; k < tam; k++)
suma += A[idy + tam + k] * B [k * tam + idx];
C[index] = suma;
}
}
__global__ void sumarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
C[index] = A[index] + B[index];
}
void imprimirMatriz(int *A, int tam)
{
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
printf("%d ", A[tam * i + j]);
}
printf("\n");
}
}
Vector newMatrizVectorizada(int tam, int val)
{
Vector v = (Vector) malloc(sizeof(int) * tam * tam);
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
v[i * tam + j] = val;
}
}
return v;
}
int main(int argc, char **argv)
{
Vector A_h, B_h, C_h;
Vector A_d, B_d, C_d;
int tam = 12;
if(argc > 1)
tam = atoi(argv[1]);
int N = tam * tam;
size_t size = N * sizeof(int);
A_h = newMatrizVectorizada(tam, 2);
B_h = newMatrizVectorizada(tam, 3);
C_h = newMatrizVectorizada(tam, 0);
hipMalloc((void**) &A_d, size);
hipMalloc((void**) &B_d, size);
hipMalloc((void**) &C_d, size);
hipMemcpy(A_d, A_h, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, size, hipMemcpyHostToDevice);
int BLOCK_SIZE = 4;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 n_blocks(techo(tam, block_size.x), techo(tam, block_size.y));
multiplicarMatrices<<< n_blocks, block_size >>> (C_d, A_d, B_d, tam);
hipMemcpy(C_h, C_d, size, hipMemcpyDeviceToHost);
imprimirMatriz(C_h, tam);
sumarMatrices <<< n_blocks, block_size>>> (C_d, A_d, B_d, tam);
hipMemcpy(C_h, C_d, size, hipMemcpyDeviceToHost);
printf("==============================================RESULTADO============================================\n");
imprimirMatriz(C_h, tam);
printf("===================================================================================================\n");
free(A_h);
free(B_h);
free(C_h);
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19multiplicarMatricesPiS_S_i
.globl _Z19multiplicarMatricesPiS_S_i
.p2align 8
.type _Z19multiplicarMatricesPiS_S_i,@function
_Z19multiplicarMatricesPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_6
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x8
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v5, s2, v1
v_mov_b32_e32 v3, v0
s_mov_b32 s3, 0
.p2align 6
.LBB0_3:
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v6, s3, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v4, 31, v3
s_add_i32 s3, s3, 1
s_cmp_eq_u32 s2, s3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v7, 31, v6
v_lshlrev_b64 v[8:9], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[6:7], 2, v[6:7]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v8, vcc_lo, s6, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
v_add_co_u32 v6, vcc_lo, s4, v6
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo
global_load_b32 v4, v[8:9], off
global_load_b32 v8, v[6:7], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[6:7], null, v4, v8, v[2:3]
v_dual_mov_b32 v2, v6 :: v_dual_add_nc_u32 v3, s2, v3
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v2, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x0
v_mad_u64_u32 v[3:4], null, v1, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19multiplicarMatricesPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19multiplicarMatricesPiS_S_i, .Lfunc_end0-_Z19multiplicarMatricesPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13sumarMatricesPiS_S_i
.globl _Z13sumarMatricesPiS_S_i
.p2align 8
.type _Z13sumarMatricesPiS_S_i,@function
_Z13sumarMatricesPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB1_2
s_load_b128 s[4:7], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, s2, v[0:1]
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s4, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13sumarMatricesPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13sumarMatricesPiS_S_i, .Lfunc_end1-_Z13sumarMatricesPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19multiplicarMatricesPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19multiplicarMatricesPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13sumarMatricesPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13sumarMatricesPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
typedef int* Vector;
int techo(int tam, int block_size)
{
return (tam / block_size) + (tam % block_size == 0? 0: 1);
}
__global__ void multiplicarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
{
int suma = 0;
for(int k = 0; k < tam; k++)
suma += A[idy + tam + k] * B [k * tam + idx];
C[index] = suma;
}
}
__global__ void sumarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
C[index] = A[index] + B[index];
}
void imprimirMatriz(int *A, int tam)
{
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
printf("%d ", A[tam * i + j]);
}
printf("\n");
}
}
Vector newMatrizVectorizada(int tam, int val)
{
Vector v = (Vector) malloc(sizeof(int) * tam * tam);
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
v[i * tam + j] = val;
}
}
return v;
}
int main(int argc, char **argv)
{
Vector A_h, B_h, C_h;
Vector A_d, B_d, C_d;
int tam = 12;
if(argc > 1)
tam = atoi(argv[1]);
int N = tam * tam;
size_t size = N * sizeof(int);
A_h = newMatrizVectorizada(tam, 2);
B_h = newMatrizVectorizada(tam, 3);
C_h = newMatrizVectorizada(tam, 0);
hipMalloc((void**) &A_d, size);
hipMalloc((void**) &B_d, size);
hipMalloc((void**) &C_d, size);
hipMemcpy(A_d, A_h, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, size, hipMemcpyHostToDevice);
int BLOCK_SIZE = 4;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 n_blocks(techo(tam, block_size.x), techo(tam, block_size.y));
multiplicarMatrices<<< n_blocks, block_size >>> (C_d, A_d, B_d, tam);
hipMemcpy(C_h, C_d, size, hipMemcpyDeviceToHost);
imprimirMatriz(C_h, tam);
sumarMatrices <<< n_blocks, block_size>>> (C_d, A_d, B_d, tam);
hipMemcpy(C_h, C_d, size, hipMemcpyDeviceToHost);
printf("==============================================RESULTADO============================================\n");
imprimirMatriz(C_h, tam);
printf("===================================================================================================\n");
free(A_h);
free(B_h);
free(C_h);
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
} | .text
.file "CUDA_matrix.hip"
.globl _Z5techoii # -- Begin function _Z5techoii
.p2align 4, 0x90
.type _Z5techoii,@function
_Z5techoii: # @_Z5techoii
.cfi_startproc
# %bb.0:
movl %edi, %eax
cltd
idivl %esi
cmpl $1, %edx
sbbl $-1, %eax
retq
.Lfunc_end0:
.size _Z5techoii, .Lfunc_end0-_Z5techoii
.cfi_endproc
# -- End function
.globl _Z34__device_stub__multiplicarMatricesPiS_S_i # -- Begin function _Z34__device_stub__multiplicarMatricesPiS_S_i
.p2align 4, 0x90
.type _Z34__device_stub__multiplicarMatricesPiS_S_i,@function
_Z34__device_stub__multiplicarMatricesPiS_S_i: # @_Z34__device_stub__multiplicarMatricesPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19multiplicarMatricesPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z34__device_stub__multiplicarMatricesPiS_S_i, .Lfunc_end1-_Z34__device_stub__multiplicarMatricesPiS_S_i
.cfi_endproc
# -- End function
.globl _Z28__device_stub__sumarMatricesPiS_S_i # -- Begin function _Z28__device_stub__sumarMatricesPiS_S_i
.p2align 4, 0x90
.type _Z28__device_stub__sumarMatricesPiS_S_i,@function
_Z28__device_stub__sumarMatricesPiS_S_i: # @_Z28__device_stub__sumarMatricesPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13sumarMatricesPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z28__device_stub__sumarMatricesPiS_S_i, .Lfunc_end2-_Z28__device_stub__sumarMatricesPiS_S_i
.cfi_endproc
# -- End function
.globl _Z14imprimirMatrizPii # -- Begin function _Z14imprimirMatrizPii
.p2align 4, 0x90
.type _Z14imprimirMatrizPii,@function
_Z14imprimirMatrizPii: # @_Z14imprimirMatrizPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, (%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB3_5
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebx
movl %esi, %r15d
xorl %ebp, %ebp
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_3 Depth 2
movl %ebp, %eax
movq (%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_3: # Parent Loop BB3_2 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%r14,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r14
cmpq %r14, %r15
jne .LBB3_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r12
addl %ebx, %ebp
cmpq %r15, %r12
jne .LBB3_2
.LBB3_5: # %._crit_edge13
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z14imprimirMatrizPii, .Lfunc_end3-_Z14imprimirMatrizPii
.cfi_endproc
# -- End function
.globl _Z20newMatrizVectorizadaii # -- Begin function _Z20newMatrizVectorizadaii
.p2align 4, 0x90
.type _Z20newMatrizVectorizadaii,@function
_Z20newMatrizVectorizadaii: # @_Z20newMatrizVectorizadaii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
movl %esi, %ebx
movl %edi, %ebp
movslq %edi, %r14
movq %r14, %rdi
imulq %r14, %rdi
shlq $2, %rdi
callq malloc
testl %r14d, %r14d
jle .LBB4_5
# %bb.1: # %.preheader.lr.ph
movl %ebp, %ecx
xorl %edx, %edx
xorl %esi, %esi
.p2align 4, 0x90
.LBB4_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_3 Depth 2
movl %edx, %edi
leaq (%rax,%rdi,4), %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB4_3: # Parent Loop BB4_2 Depth=1
# => This Inner Loop Header: Depth=2
movl %ebx, (%rdi,%r8,4)
incq %r8
cmpq %r8, %rcx
jne .LBB4_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB4_2 Depth=1
incq %rsi
addl %ebp, %edx
cmpq %rcx, %rsi
jne .LBB4_2
.LBB4_5: # %._crit_edge17
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z20newMatrizVectorizadaii, .Lfunc_end4-_Z20newMatrizVectorizadaii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $12, %ebx
cmpl $2, %edi
jl .LBB5_2
# %bb.1:
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
.LBB5_2:
movl %ebx, %r15d
imull %r15d, %r15d
movslq %ebx, %r14
movq %r14, %r13
imulq %r14, %r13
shlq $2, %r13
movq %r13, %rdi
callq malloc
movl %ebx, %r12d
testl %r14d, %r14d
jle .LBB5_7
# %bb.3: # %.preheader.lr.ph.i
xorl %edi, %edi
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_4: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB5_5 Depth 2
movl %edi, %edx
leaq (%rax,%rdx,4), %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_5: # Parent Loop BB5_4 Depth=1
# => This Inner Loop Header: Depth=2
movl $2, (%rdx,%rsi,4)
incq %rsi
cmpq %rsi, %r12
jne .LBB5_5
# %bb.6: # %._crit_edge.i
# in Loop: Header=BB5_4 Depth=1
incq %rcx
addl %ebx, %edi
cmpq %r12, %rcx
jne .LBB5_4
.LBB5_7: # %_Z20newMatrizVectorizadaii.exit
movq %rax, 128(%rsp) # 8-byte Spill
movq %r13, %rdi
callq malloc
testl %ebx, %ebx
jle .LBB5_12
# %bb.8: # %.preheader.lr.ph.i47
xorl %edi, %edi
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_9: # %.preheader.i49
# =>This Loop Header: Depth=1
# Child Loop BB5_10 Depth 2
movl %edi, %edx
leaq (%rax,%rdx,4), %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_10: # Parent Loop BB5_9 Depth=1
# => This Inner Loop Header: Depth=2
movl $3, (%rdx,%rsi,4)
incq %rsi
cmpq %rsi, %r12
jne .LBB5_10
# %bb.11: # %._crit_edge.i55
# in Loop: Header=BB5_9 Depth=1
incq %rcx
addl %ebx, %edi
cmpq %r12, %rcx
jne .LBB5_9
.LBB5_12: # %_Z20newMatrizVectorizadaii.exit58
movq %rax, 120(%rsp) # 8-byte Spill
shlq $2, %r15
movq %r15, 112(%rsp) # 8-byte Spill
movq %r13, %rdi
callq malloc
movq %rax, %r13
testl %ebx, %ebx
jle .LBB5_15
# %bb.13: # %.preheader.lr.ph.i59
leaq (,%r12,4), %rbp
xorl %r14d, %r14d
movq %r12, %r15
.p2align 4, 0x90
.LBB5_14: # %.preheader.i61
# =>This Inner Loop Header: Depth=1
movl %r14d, %eax
leaq (,%rax,4), %rdi
addq %r13, %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq memset@PLT
addl %ebx, %r14d
decq %r15
jne .LBB5_14
.LBB5_15: # %_Z20newMatrizVectorizadaii.exit70
movabsq $17179869188, %r15 # imm = 0x400000004
leaq 32(%rsp), %rdi
movq 112(%rsp), %r14 # 8-byte Reload
movq %r14, %rsi
callq hipMalloc
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 32(%rsp), %rdi
movq 128(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movq 120(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leal 3(%rbx), %eax
testl %ebx, %ebx
cmovnsl %ebx, %eax
sarl $2, %eax
movl %ebx, %ecx
andl $3, %ecx
cmpl $1, %ecx
sbbl $-1, %eax
movq %rax, %rdi
shlq $32, %rdi
orq %rax, %rdi
movq %rdi, 176(%rsp) # 8-byte Spill
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_17
# %bb.16:
movq 16(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebx, 12(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 12(%rsp), %rax
movq %rax, 168(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z19multiplicarMatricesPiS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_17:
movq 16(%rsp), %rsi
movq %r13, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
testl %ebx, %ebx
movq %r13, 136(%rsp) # 8-byte Spill
jle .LBB5_22
# %bb.18: # %.preheader.lr.ph.i71
xorl %ebp, %ebp
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_19: # %.preheader.i72
# =>This Loop Header: Depth=1
# Child Loop BB5_20 Depth 2
movl %ebp, %eax
leaq (,%rax,4), %r15
addq %r13, %r15
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB5_20: # Parent Loop BB5_19 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r15,%r13,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r13
cmpq %r13, %r12
jne .LBB5_20
# %bb.21: # %._crit_edge.i77
# in Loop: Header=BB5_19 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addl %ebx, %ebp
cmpq %r12, %r14
movq 136(%rsp), %r13 # 8-byte Reload
jne .LBB5_19
.LBB5_22: # %_Z14imprimirMatrizPii.exit
movq 176(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movabsq $17179869188, %rdx # imm = 0x400000004
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_24
# %bb.23:
movq 16(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebx, 12(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 12(%rsp), %rax
movq %rax, 168(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z13sumarMatricesPiS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_24:
movq 16(%rsp), %rsi
movq %r13, %rdi
movq 112(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
testl %ebx, %ebx
jle .LBB5_29
# %bb.25: # %.preheader.lr.ph.i84
xorl %ebp, %ebp
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_26: # %.preheader.i86
# =>This Loop Header: Depth=1
# Child Loop BB5_27 Depth 2
movl %ebp, %eax
leaq (,%rax,4), %r15
addq %r13, %r15
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB5_27: # Parent Loop BB5_26 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r15,%r13,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r13
cmpq %r13, %r12
jne .LBB5_27
# %bb.28: # %._crit_edge.i92
# in Loop: Header=BB5_26 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addl %ebx, %ebp
cmpq %r12, %r14
movq 136(%rsp), %r13 # 8-byte Reload
jne .LBB5_26
.LBB5_29: # %_Z14imprimirMatrizPii.exit96
movl $.Lstr.1, %edi
callq puts@PLT
movq 128(%rsp), %rdi # 8-byte Reload
callq free
movq 120(%rsp), %rdi # 8-byte Reload
callq free
movq %r13, %rdi
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19multiplicarMatricesPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13sumarMatricesPiS_S_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19multiplicarMatricesPiS_S_i,@object # @_Z19multiplicarMatricesPiS_S_i
.section .rodata,"a",@progbits
.globl _Z19multiplicarMatricesPiS_S_i
.p2align 3, 0x0
_Z19multiplicarMatricesPiS_S_i:
.quad _Z34__device_stub__multiplicarMatricesPiS_S_i
.size _Z19multiplicarMatricesPiS_S_i, 8
.type _Z13sumarMatricesPiS_S_i,@object # @_Z13sumarMatricesPiS_S_i
.globl _Z13sumarMatricesPiS_S_i
.p2align 3, 0x0
_Z13sumarMatricesPiS_S_i:
.quad _Z28__device_stub__sumarMatricesPiS_S_i
.size _Z13sumarMatricesPiS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d "
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z19multiplicarMatricesPiS_S_i"
.size .L__unnamed_1, 31
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z13sumarMatricesPiS_S_i"
.size .L__unnamed_2, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "==============================================RESULTADO============================================"
.size .Lstr, 100
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "==================================================================================================="
.size .Lstr.1, 100
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__multiplicarMatricesPiS_S_i
.addrsig_sym _Z28__device_stub__sumarMatricesPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19multiplicarMatricesPiS_S_i
.addrsig_sym _Z13sumarMatricesPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13sumarMatricesPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */
/* 0x000fe200078e0200 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fc800078e0207 */
/*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x0c0fe400078e0207 */
/*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x160] ; /* 0x0000580000067625 */
/* 0x000fe200078e0207 */
/*0120*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z19multiplicarMatricesPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e280000002100 */
/*0030*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R5 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R5, R0, c[0x0][0x4], R3 ; /* 0x0000010000057a24 */
/* 0x002fca00078e0203 */
/*0080*/ ISETP.GE.OR P0, PT, R5, c[0x0][0x178], P0 ; /* 0x00005e0005007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R4, c[0x0][0x178] ; /* 0x00005e0000047a02 */
/* 0x000fe20000000f00 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R18, -RZ, RZ, 0, 0 ; /* 0x00000000ff127435 */
/* 0x000fe400000001ff */
/*00d0*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fda0003f06270 */
/*00e0*/ @!P0 BRA 0xc10 ; /* 0x00000b2000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R6, R4.reuse, -0x1, RZ ; /* 0xffffffff04067810 */
/* 0x040fe40007ffe0ff */
/*0100*/ LOP3.LUT R7, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304077812 */
/* 0x000fe400078ec0ff */
/*0110*/ ISETP.GE.U32.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fe40003f06070 */
/*0120*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fe40000000f00 */
/*0130*/ MOV R18, RZ ; /* 0x000000ff00127202 */
/* 0x000fd20000000f00 */
/*0140*/ @!P0 BRA 0xb00 ; /* 0x000009b000008947 */
/* 0x000fea0003800000 */
/*0150*/ IADD3 R8, -R7, c[0x0][0x178], RZ ; /* 0x00005e0007087a10 */
/* 0x000fe20007ffe1ff */
/*0160*/ HFMA2.MMA R27, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff1b7435 */
/* 0x000fe200000001ff */
/*0170*/ IADD3 R9, R3, c[0x0][0x178], RZ ; /* 0x00005e0003097a10 */
/* 0x000fe20007ffe0ff */
/*0180*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe20000000a00 */
/*0190*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f04270 */
/*01a0*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fe20000000f00 */
/*01b0*/ IMAD R9, R0, c[0x0][0x4], R9 ; /* 0x0000010000097a24 */
/* 0x000fc800078e0209 */
/*01c0*/ IMAD.WIDE R26, R2, R27, c[0x0][0x170] ; /* 0x00005c00021a7625 */
/* 0x000fcc00078e021b */
/*01d0*/ @!P0 BRA 0x970 ; /* 0x0000079000008947 */
/* 0x000fea0003800000 */
/*01e0*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*01f0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0200*/ @!P1 BRA 0x6b0 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*0210*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0220*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0230*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x0000a2000c1e1900 */
/*0240*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0250*/ IMAD.WIDE R12, R9, 0x4, R12 ; /* 0x00000004090c7825 */
/* 0x000fca00078e020c */
/*0260*/ LDG.E R28, [R12.64] ; /* 0x000000040c1c7981 */
/* 0x000ea2000c1e1900 */
/*0270*/ IMAD.WIDE R16, R4, 0x4, R26 ; /* 0x0000000404107825 */
/* 0x000fc600078e021a */
/*0280*/ LDG.E R23, [R12.64+0x4] ; /* 0x000004040c177981 */
/* 0x000ee6000c1e1900 */
/*0290*/ IMAD.WIDE R10, R4.reuse, 0x4, R16 ; /* 0x00000004040a7825 */
/* 0x040fe200078e0210 */
/*02a0*/ LDG.E R22, [R16.64] ; /* 0x0000000410167981 */
/* 0x0002e8000c1e1900 */
/*02b0*/ LDG.E R25, [R10.64] ; /* 0x000000040a197981 */
/* 0x000968000c1e1900 */
/*02c0*/ LDG.E R24, [R12.64+0x8] ; /* 0x000008040c187981 */
/* 0x000f68000c1e1900 */
/*02d0*/ LDG.E R14, [R12.64+0xc] ; /* 0x00000c040c0e7981 */
/* 0x000f62000c1e1900 */
/*02e0*/ IMAD.WIDE R10, R4, 0x4, R10 ; /* 0x00000004040a7825 */
/* 0x010fc600078e020a */
/*02f0*/ LDG.E R26, [R12.64+0x10] ; /* 0x000010040c1a7981 */
/* 0x001f28000c1e1900 */
/*0300*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */
/* 0x000122000c1e1900 */
/*0310*/ IMAD.WIDE R20, R4, 0x4, R10 ; /* 0x0000000404147825 */
/* 0x000fca00078e020a */
/*0320*/ LDG.E R27, [R20.64] ; /* 0x00000004141b7981 */
/* 0x000122000c1e1900 */
/*0330*/ IMAD.WIDE R16, R4, 0x4, R20 ; /* 0x0000000404107825 */
/* 0x002fc600078e0214 */
/*0340*/ LDG.E R10, [R12.64+0x18] ; /* 0x000018040c0a7981 */
/* 0x001f28000c1e1900 */
/*0350*/ LDG.E R21, [R12.64+0x1c] ; /* 0x00001c040c157981 */
/* 0x000f22000c1e1900 */
/*0360*/ IMAD R28, R19, R28, R18 ; /* 0x0000001c131c7224 */
/* 0x004fc600078e0212 */
/*0370*/ LDG.E R18, [R12.64+0x14] ; /* 0x000014040c127981 */
/* 0x000ea8000c1e1900 */
/*0380*/ LDG.E R19, [R16.64] ; /* 0x0000000410137981 */
/* 0x0000a2000c1e1900 */
/*0390*/ IMAD R28, R22, R23, R28 ; /* 0x00000017161c7224 */
/* 0x008fe400078e021c */
/*03a0*/ IMAD.WIDE R16, R4, 0x4, R16 ; /* 0x0000000404107825 */
/* 0x001fca00078e0210 */
/*03b0*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */
/* 0x000ae2000c1e1900 */
/*03c0*/ IMAD.WIDE R22, R4, 0x4, R16 ; /* 0x0000000404167825 */
/* 0x000fca00078e0210 */
/*03d0*/ LDG.E R20, [R22.64] ; /* 0x0000000416147981 */
/* 0x0000e2000c1e1900 */
/*03e0*/ IMAD R16, R25, R24, R28 ; /* 0x0000001819107224 */
/* 0x020fe400078e021c */
/*03f0*/ IMAD.WIDE R24, R4.reuse, 0x4, R22 ; /* 0x0000000404187825 */
/* 0x040fe200078e0216 */
/*0400*/ LDG.E R28, [R12.64+0x20] ; /* 0x000020040c1c7981 */
/* 0x000f68000c1e1900 */
/*0410*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x000362000c1e1900 */
/*0420*/ IMAD R16, R15, R14, R16 ; /* 0x0000000e0f107224 */
/* 0x010fe400078e0210 */
/*0430*/ IMAD.WIDE R14, R4, 0x4, R24 ; /* 0x00000004040e7825 */
/* 0x000fe200078e0218 */
/*0440*/ LDG.E R23, [R12.64+0x24] ; /* 0x000024040c177981 */
/* 0x001f26000c1e1900 */
/*0450*/ IMAD R26, R27, R26, R16 ; /* 0x0000001a1b1a7224 */
/* 0x000fc400078e0210 */
/*0460*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x000fe200078e020e */
/*0470*/ LDG.E R27, [R12.64+0x28] ; /* 0x000028040c1b7981 */
/* 0x000f28000c1e1900 */
/*0480*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000128000c1e1900 */
/*0490*/ LDG.E R22, [R16.64] ; /* 0x0000000410167981 */
/* 0x000328000c1e1900 */
/*04a0*/ LDG.E R15, [R12.64+0x30] ; /* 0x000030040c0f7981 */
/* 0x001f22000c1e1900 */
/*04b0*/ IMAD R26, R19, R18, R26 ; /* 0x00000012131a7224 */
/* 0x004fc400078e021a */
/*04c0*/ IMAD.WIDE R18, R4, 0x4, R16 ; /* 0x0000000404127825 */
/* 0x000fc800078e0210 */
/*04d0*/ IMAD R26, R11, R10, R26 ; /* 0x0000000a0b1a7224 */
/* 0x008fe400078e021a */
/*04e0*/ IMAD.WIDE R10, R4, 0x4, R18 ; /* 0x00000004040a7825 */
/* 0x000fe400078e0212 */
/*04f0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x0000a4000c1e1900 */
/*0500*/ IMAD R24, R20, R21, R26 ; /* 0x0000001514187224 */
/* 0x002fe400078e021a */
/*0510*/ IMAD.WIDE R20, R4, 0x4, R10 ; /* 0x0000000404147825 */
/* 0x000fe200078e020a */
/*0520*/ LDG.E R26, [R12.64+0x2c] ; /* 0x00002c040c1a7981 */
/* 0x000ea8000c1e1900 */
/*0530*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x0002e2000c1e1900 */
/*0540*/ IMAD R28, R29, R28, R24 ; /* 0x0000001c1d1c7224 */
/* 0x020fc600078e0218 */
/*0550*/ LDG.E R19, [R12.64+0x38] ; /* 0x000038040c137981 */
/* 0x001f62000c1e1900 */
/*0560*/ IMAD.WIDE R24, R4, 0x4, R20 ; /* 0x0000000404187825 */
/* 0x000fc600078e0214 */
/*0570*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000168000c1e1900 */
/*0580*/ LDG.E R11, [R12.64+0x34] ; /* 0x000034040c0b7981 */
/* 0x002f62000c1e1900 */
/*0590*/ IMAD.WIDE R16, R4, 0x4, R24 ; /* 0x0000000404107825 */
/* 0x000fc600078e0218 */
/*05a0*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x000368000c1e1900 */
/*05b0*/ LDG.E R21, [R16.64] ; /* 0x0000000410157981 */
/* 0x001f68000c1e1900 */
/*05c0*/ LDG.E R24, [R12.64+0x3c] ; /* 0x00003c040c187981 */
/* 0x002f62000c1e1900 */
/*05d0*/ IMAD R14, R14, R23, R28 ; /* 0x000000170e0e7224 */
/* 0x010fe200078e021c */
/*05e0*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */
/* 0x000fc60007ffe0ff */
/*05f0*/ IMAD R27, R22, R27, R14 ; /* 0x0000001b161b7224 */
/* 0x000fe200078e020e */
/*0600*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe20003f24270 */
/*0610*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0620*/ IADD3 R6, R6, 0x10, RZ ; /* 0x0000001006067810 */
/* 0x000fc60007ffe0ff */
/*0630*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0640*/ IMAD R18, R18, R26, R27 ; /* 0x0000001a12127224 */
/* 0x004fc800078e021b */
/*0650*/ IMAD R10, R10, R15, R18 ; /* 0x0000000f0a0a7224 */
/* 0x008fe400078e0212 */
/*0660*/ IMAD.WIDE R26, R4, 0x4, R16 ; /* 0x00000004041a7825 */
/* 0x000fc800078e0210 */
/*0670*/ IMAD R10, R20, R11, R10 ; /* 0x0000000b140a7224 */
/* 0x020fc800078e020a */
/*0680*/ IMAD R10, R29, R19, R10 ; /* 0x000000131d0a7224 */
/* 0x000fc800078e020a */
/*0690*/ IMAD R18, R21, R24, R10 ; /* 0x0000001815127224 */
/* 0x000fe200078e020a */
/*06a0*/ @P1 BRA 0x220 ; /* 0xfffffb7000001947 */
/* 0x000fea000383ffff */
/*06b0*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f24270 */
/*06c0*/ @!P1 BRA 0x950 ; /* 0x0000028000009947 */
/* 0x000fea0003800000 */
/*06d0*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe20008000f00 */
/*06e0*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x000ea2000c1e1900 */
/*06f0*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fca0008000f00 */
/*0700*/ IMAD.WIDE R10, R9, 0x4, R10 ; /* 0x00000004090a7825 */
/* 0x000fca00078e020a */
/*0710*/ LDG.E R24, [R10.64] ; /* 0x000000040a187981 */
/* 0x000ea2000c1e1900 */
/*0720*/ IMAD.WIDE R22, R4, 0x4, R26 ; /* 0x0000000404167825 */
/* 0x000fc600078e021a */
/*0730*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee6000c1e1900 */
/*0740*/ IMAD.WIDE R14, R4.reuse, 0x4, R22 ; /* 0x00000004040e7825 */
/* 0x040fe200078e0216 */
/*0750*/ LDG.E R29, [R10.64+0x8] ; /* 0x000008040a1d7981 */
/* 0x000f28000c1e1900 */
/*0760*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x0000e2000c1e1900 */
/*0770*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x000fc600078e020e */
/*0780*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000326000c1e1900 */
/*0790*/ IMAD.WIDE R12, R4.reuse, 0x4, R16 ; /* 0x00000004040c7825 */
/* 0x040fe200078e0210 */
/*07a0*/ LDG.E R28, [R10.64+0xc] ; /* 0x00000c040a1c7981 */
/* 0x000f68000c1e1900 */
/*07b0*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000362000c1e1900 */
/*07c0*/ IMAD.WIDE R20, R4, 0x4, R12 ; /* 0x0000000404147825 */
/* 0x000fc600078e020c */
/*07d0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000328000c1e1900 */
/*07e0*/ LDG.E R23, [R10.64+0x14] ; /* 0x000014040a177981 */
/* 0x001f68000c1e1900 */
/*07f0*/ LDG.E R16, [R10.64+0x18] ; /* 0x000018040a107981 */
/* 0x002f68000c1e1900 */
/*0800*/ LDG.E R13, [R10.64+0x10] ; /* 0x000010040a0d7981 */
/* 0x000f62000c1e1900 */
/*0810*/ IMAD R24, R19, R24, R18 ; /* 0x0000001813187224 */
/* 0x004fc400078e0212 */
/*0820*/ IMAD.WIDE R18, R4.reuse, 0x4, R20 ; /* 0x0000000404127825 */
/* 0x040fe400078e0214 */
/*0830*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000ea8000c1e1900 */
/*0840*/ IMAD.WIDE R26, R4, 0x4, R18 ; /* 0x00000004041a7825 */
/* 0x000fe200078e0212 */
/*0850*/ LDG.E R15, [R18.64] ; /* 0x00000004120f7981 */
/* 0x0000a8000c1e1900 */
/*0860*/ LDG.E R19, [R10.64+0x1c] ; /* 0x00001c040a137981 */
/* 0x001ea8000c1e1900 */
/*0870*/ LDG.E R18, [R26.64] ; /* 0x000000041a127981 */
/* 0x0000a2000c1e1900 */
/*0880*/ IMAD R22, R22, R25, R24 ; /* 0x0000001916167224 */
/* 0x008fc800078e0218 */
/*0890*/ IMAD R14, R14, R29, R22 ; /* 0x0000001d0e0e7224 */
/* 0x010fc800078e0216 */
/*08a0*/ IMAD R14, R17, R28, R14 ; /* 0x0000001c110e7224 */
/* 0x020fc800078e020e */
/*08b0*/ IMAD R12, R12, R13, R14 ; /* 0x0000000d0c0c7224 */
/* 0x000fe200078e020e */
/*08c0*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*08d0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0e170 */
/*08e0*/ IMAD.WIDE R26, R4, 0x4, R26 ; /* 0x00000004041a7825 */
/* 0x001fe200078e021a */
/*08f0*/ IADD3 R6, R6, 0x8, RZ ; /* 0x0000000806067810 */
/* 0x000fe40007ffe0ff */
/*0900*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */
/* 0x000fe20007ffe0ff */
/*0910*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0920*/ IMAD R12, R20, R23, R12 ; /* 0x00000017140c7224 */
/* 0x004fc800078e020c */
/*0930*/ IMAD R12, R15, R16, R12 ; /* 0x000000100f0c7224 */
/* 0x000fc800078e020c */
/*0940*/ IMAD R18, R18, R19, R12 ; /* 0x0000001312127224 */
/* 0x000fe400078e020c */
/*0950*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fda0000705670 */
/*0960*/ @!P0 BRA 0xb00 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*0970*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe20008000f00 */
/*0980*/ IMAD.WIDE R12, R4, 0x4, R26 ; /* 0x00000004040c7825 */
/* 0x000fe200078e021a */
/*0990*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe20008000f00 */
/*09a0*/ LDG.E R27, [R26.64] ; /* 0x000000041a1b7981 */
/* 0x000ea8000c1e1900 */
/*09b0*/ IMAD.WIDE R10, R9, 0x4, R10 ; /* 0x00000004090a7825 */
/* 0x000fc800078e020a */
/*09c0*/ IMAD.WIDE R14, R4.reuse, 0x4, R12 ; /* 0x00000004040e7825 */
/* 0x040fe200078e020c */
/*09d0*/ LDG.E R19, [R10.64] ; /* 0x000000040a137981 */
/* 0x000ea8000c1e1900 */
/*09e0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000ee2000c1e1900 */
/*09f0*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x000fc600078e020e */
/*0a00*/ LDG.E R20, [R10.64+0x4] ; /* 0x000004040a147981 */
/* 0x000ee8000c1e1900 */
/*0a10*/ LDG.E R22, [R14.64] ; /* 0x000000040e167981 */
/* 0x000f28000c1e1900 */
/*0a20*/ LDG.E R21, [R10.64+0x8] ; /* 0x000008040a157981 */
/* 0x000f28000c1e1900 */
/*0a30*/ LDG.E R23, [R10.64+0xc] ; /* 0x00000c040a177981 */
/* 0x000f68000c1e1900 */
/*0a40*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */
/* 0x000f62000c1e1900 */
/*0a50*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */
/* 0x000fc80007ffe0ff */
/*0a60*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f05270 */
/*0a70*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a80*/ IADD3 R6, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x000fc60007ffe0ff */
/*0a90*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0aa0*/ IMAD R19, R27, R19, R18 ; /* 0x000000131b137224 */
/* 0x004fc800078e0212 */
/*0ab0*/ IMAD R19, R12, R20, R19 ; /* 0x000000140c137224 */
/* 0x008fe400078e0213 */
/*0ac0*/ IMAD.WIDE R26, R4, 0x4, R16 ; /* 0x00000004041a7825 */
/* 0x000fc800078e0210 */
/*0ad0*/ IMAD R19, R22, R21, R19 ; /* 0x0000001516137224 */
/* 0x010fc800078e0213 */
/*0ae0*/ IMAD R18, R24, R23, R19 ; /* 0x0000001718127224 */
/* 0x020fe200078e0213 */
/*0af0*/ @P0 BRA 0x970 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0b00*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fda0003f05270 */
/*0b10*/ @!P0 BRA 0xc10 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*0b20*/ HFMA2.MMA R10, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0a7435 */
/* 0x000fe200000001ff */
/*0b30*/ IADD3 R3, R3, c[0x0][0x178], R6 ; /* 0x00005e0003037a10 */
/* 0x000fe20007ffe006 */
/*0b40*/ IMAD R6, R6, c[0x0][0x178], R2 ; /* 0x00005e0006067a24 */
/* 0x000fc800078e0202 */
/*0b50*/ IMAD R3, R0, c[0x0][0x4], R3 ; /* 0x0000010000037a24 */
/* 0x000fc800078e0203 */
/*0b60*/ IMAD.WIDE R8, R3, R10, c[0x0][0x168] ; /* 0x00005a0003087625 */
/* 0x000fc800078e020a */
/*0b70*/ IMAD.WIDE R10, R6, R10, c[0x0][0x170] ; /* 0x00005c00060a7625 */
/* 0x000fca00078e020a */
/*0b80*/ LDG.E R3, [R10.64] ; /* 0x000000040a037981 */
/* 0x0000a8000c1e1900 */
/*0b90*/ LDG.E R0, [R8.64] ; /* 0x0000000408007981 */
/* 0x0002a2000c1e1900 */
/*0ba0*/ IADD3 R7, R7, -0x1, RZ ; /* 0xffffffff07077810 */
/* 0x000fc80007ffe0ff */
/*0bb0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*0bc0*/ IMAD.WIDE R10, R4, 0x4, R10 ; /* 0x00000004040a7825 */
/* 0x001fe200078e020a */
/*0bd0*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x002fc80007f3e0ff */
/*0be0*/ IADD3.X R9, RZ, R9, RZ, P1, !PT ; /* 0x00000009ff097210 */
/* 0x000fe20000ffe4ff */
/*0bf0*/ IMAD R18, R3, R0, R18 ; /* 0x0000000003127224 */
/* 0x004fcc00078e0212 */
/*0c00*/ @P0 BRA 0xb80 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0c10*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fe20000000f00 */
/*0c20*/ IMAD R2, R5, c[0x0][0x178], R2 ; /* 0x00005e0005027a24 */
/* 0x000fc800078e0202 */
/*0c30*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0c40*/ STG.E [R2.64], R18 ; /* 0x0000001202007986 */
/* 0x000fe2000c101904 */
/*0c50*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c60*/ BRA 0xc60; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19multiplicarMatricesPiS_S_i
.globl _Z19multiplicarMatricesPiS_S_i
.p2align 8
.type _Z19multiplicarMatricesPiS_S_i,@function
_Z19multiplicarMatricesPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_6
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x8
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v5, s2, v1
v_mov_b32_e32 v3, v0
s_mov_b32 s3, 0
.p2align 6
.LBB0_3:
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v6, s3, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v4, 31, v3
s_add_i32 s3, s3, 1
s_cmp_eq_u32 s2, s3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v7, 31, v6
v_lshlrev_b64 v[8:9], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[6:7], 2, v[6:7]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v8, vcc_lo, s6, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
v_add_co_u32 v6, vcc_lo, s4, v6
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo
global_load_b32 v4, v[8:9], off
global_load_b32 v8, v[6:7], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[6:7], null, v4, v8, v[2:3]
v_dual_mov_b32 v2, v6 :: v_dual_add_nc_u32 v3, s2, v3
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v2, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x0
v_mad_u64_u32 v[3:4], null, v1, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19multiplicarMatricesPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19multiplicarMatricesPiS_S_i, .Lfunc_end0-_Z19multiplicarMatricesPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13sumarMatricesPiS_S_i
.globl _Z13sumarMatricesPiS_S_i
.p2align 8
.type _Z13sumarMatricesPiS_S_i,@function
_Z13sumarMatricesPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB1_2
s_load_b128 s[4:7], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, s2, v[0:1]
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s4, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13sumarMatricesPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13sumarMatricesPiS_S_i, .Lfunc_end1-_Z13sumarMatricesPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19multiplicarMatricesPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19multiplicarMatricesPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13sumarMatricesPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13sumarMatricesPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0010c29c_00000000-6_CUDA_matrix.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z5techoii
.type _Z5techoii, @function
_Z5techoii:
.LFB2057:
.cfi_startproc
endbr64
movl %edi, %eax
cltd
idivl %esi
cmpl $1, %edx
sbbl $-1, %eax
ret
.cfi_endproc
.LFE2057:
.size _Z5techoii, .-_Z5techoii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d "
.LC1:
.string "\n"
.text
.globl _Z14imprimirMatrizPii
.type _Z14imprimirMatrizPii, @function
_Z14imprimirMatrizPii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %esi, 12(%rsp)
testl %esi, %esi
jle .L4
movslq %esi, %r14
leaq 0(,%r14,4), %r15
leaq (%rdi,%r15), %rbp
negq %r14
salq $2, %r14
movl $0, %r13d
leaq .LC0(%rip), %r12
.L6:
leaq 0(%rbp,%r14), %rbx
.L7:
movl (%rbx), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L7
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addq %r15, %rbp
cmpl %r13d, 12(%rsp)
jne .L6
.L4:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z14imprimirMatrizPii, .-_Z14imprimirMatrizPii
.globl _Z20newMatrizVectorizadaii
.type _Z20newMatrizVectorizadaii, @function
_Z20newMatrizVectorizadaii:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %edi, %ebp
movl %esi, %ebx
movslq %edi, %r12
movq %r12, %rdi
imulq %r12, %rdi
salq $2, %rdi
call malloc@PLT
testl %ebp, %ebp
jle .L10
leaq 0(,%r12,4), %r8
leaq (%rax,%r8), %rcx
negq %r12
leaq 0(,%r12,4), %rdi
movl $0, %esi
.L12:
leaq (%rcx,%rdi), %rdx
.L13:
movl %ebx, (%rdx)
addq $4, %rdx
cmpq %rcx, %rdx
jne .L13
addl $1, %esi
addq %r8, %rcx
cmpl %esi, %ebp
jne .L12
.L10:
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z20newMatrizVectorizadaii, .-_Z20newMatrizVectorizadaii
.globl _Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i
.type _Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i, @function
_Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19multiplicarMatricesPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i, .-_Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i
.globl _Z19multiplicarMatricesPiS_S_i
.type _Z19multiplicarMatricesPiS_S_i, @function
_Z19multiplicarMatricesPiS_S_i:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z19multiplicarMatricesPiS_S_i, .-_Z19multiplicarMatricesPiS_S_i
.globl _Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i
.type _Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i, @function
_Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L28
.L24:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L29
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13sumarMatricesPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L24
.L29:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i, .-_Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i
.globl _Z13sumarMatricesPiS_S_i
.type _Z13sumarMatricesPiS_S_i, @function
_Z13sumarMatricesPiS_S_i:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z13sumarMatricesPiS_S_i, .-_Z13sumarMatricesPiS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "==============================================RESULTADO============================================\n"
.align 8
.LC3:
.string "===================================================================================================\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $12, %ebx
cmpl $1, %edi
jg .L39
.L33:
movl %ebx, %ebp
imull %ebx, %ebp
movslq %ebp, %rbp
salq $2, %rbp
movl $2, %esi
movl %ebx, %edi
call _Z20newMatrizVectorizadaii
movq %rax, %r14
movl $3, %esi
movl %ebx, %edi
call _Z20newMatrizVectorizadaii
movq %rax, %r13
movl $0, %esi
movl %ebx, %edi
call _Z20newMatrizVectorizadaii
movq %rax, %r12
leaq 8(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbp, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 40(%rsp)
testb $3, %bl
setne %dl
movzbl %dl, %edx
leal 3(%rbx), %eax
testl %ebx, %ebx
cmovns %ebx, %eax
sarl $2, %eax
addl %edx, %eax
movl %eax, 44(%rsp)
movl %eax, 48(%rsp)
movl $1, 52(%rsp)
movl $4, 32(%rsp)
movl $4, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L40
.L34:
movl $2, %ecx
movq %rbp, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl %ebx, %esi
movq %r12, %rdi
call _Z14imprimirMatrizPii
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl 52(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L41
.L35:
movl $2, %ecx
movq %rbp, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %esi
movq %r12, %rdi
call _Z14imprimirMatrizPii
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r14, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L42
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %ebx
jmp .L33
.L40:
movl %ebx, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z44__device_stub__Z19multiplicarMatricesPiS_S_iPiS_S_i
jmp .L34
.L41:
movl %ebx, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z38__device_stub__Z13sumarMatricesPiS_S_iPiS_S_i
jmp .L35
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z13sumarMatricesPiS_S_i"
.section .rodata.str1.8
.align 8
.LC5:
.string "_Z19multiplicarMatricesPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z13sumarMatricesPiS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z19multiplicarMatricesPiS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "CUDA_matrix.hip"
.globl _Z5techoii # -- Begin function _Z5techoii
.p2align 4, 0x90
.type _Z5techoii,@function
_Z5techoii: # @_Z5techoii
.cfi_startproc
# %bb.0:
movl %edi, %eax
cltd
idivl %esi
cmpl $1, %edx
sbbl $-1, %eax
retq
.Lfunc_end0:
.size _Z5techoii, .Lfunc_end0-_Z5techoii
.cfi_endproc
# -- End function
.globl _Z34__device_stub__multiplicarMatricesPiS_S_i # -- Begin function _Z34__device_stub__multiplicarMatricesPiS_S_i
.p2align 4, 0x90
.type _Z34__device_stub__multiplicarMatricesPiS_S_i,@function
_Z34__device_stub__multiplicarMatricesPiS_S_i: # @_Z34__device_stub__multiplicarMatricesPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19multiplicarMatricesPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z34__device_stub__multiplicarMatricesPiS_S_i, .Lfunc_end1-_Z34__device_stub__multiplicarMatricesPiS_S_i
.cfi_endproc
# -- End function
.globl _Z28__device_stub__sumarMatricesPiS_S_i # -- Begin function _Z28__device_stub__sumarMatricesPiS_S_i
.p2align 4, 0x90
.type _Z28__device_stub__sumarMatricesPiS_S_i,@function
_Z28__device_stub__sumarMatricesPiS_S_i: # @_Z28__device_stub__sumarMatricesPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13sumarMatricesPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z28__device_stub__sumarMatricesPiS_S_i, .Lfunc_end2-_Z28__device_stub__sumarMatricesPiS_S_i
.cfi_endproc
# -- End function
.globl _Z14imprimirMatrizPii # -- Begin function _Z14imprimirMatrizPii
.p2align 4, 0x90
.type _Z14imprimirMatrizPii,@function
_Z14imprimirMatrizPii: # @_Z14imprimirMatrizPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, (%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB3_5
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebx
movl %esi, %r15d
xorl %ebp, %ebp
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_3 Depth 2
movl %ebp, %eax
movq (%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_3: # Parent Loop BB3_2 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r13,%r14,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r14
cmpq %r14, %r15
jne .LBB3_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r12
addl %ebx, %ebp
cmpq %r15, %r12
jne .LBB3_2
.LBB3_5: # %._crit_edge13
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z14imprimirMatrizPii, .Lfunc_end3-_Z14imprimirMatrizPii
.cfi_endproc
# -- End function
.globl _Z20newMatrizVectorizadaii # -- Begin function _Z20newMatrizVectorizadaii
.p2align 4, 0x90
.type _Z20newMatrizVectorizadaii,@function
_Z20newMatrizVectorizadaii: # @_Z20newMatrizVectorizadaii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
movl %esi, %ebx
movl %edi, %ebp
movslq %edi, %r14
movq %r14, %rdi
imulq %r14, %rdi
shlq $2, %rdi
callq malloc
testl %r14d, %r14d
jle .LBB4_5
# %bb.1: # %.preheader.lr.ph
movl %ebp, %ecx
xorl %edx, %edx
xorl %esi, %esi
.p2align 4, 0x90
.LBB4_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_3 Depth 2
movl %edx, %edi
leaq (%rax,%rdi,4), %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB4_3: # Parent Loop BB4_2 Depth=1
# => This Inner Loop Header: Depth=2
movl %ebx, (%rdi,%r8,4)
incq %r8
cmpq %r8, %rcx
jne .LBB4_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB4_2 Depth=1
incq %rsi
addl %ebp, %edx
cmpq %rcx, %rsi
jne .LBB4_2
.LBB4_5: # %._crit_edge17
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z20newMatrizVectorizadaii, .Lfunc_end4-_Z20newMatrizVectorizadaii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $12, %ebx
cmpl $2, %edi
jl .LBB5_2
# %bb.1:
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
.LBB5_2:
movl %ebx, %r15d
imull %r15d, %r15d
movslq %ebx, %r14
movq %r14, %r13
imulq %r14, %r13
shlq $2, %r13
movq %r13, %rdi
callq malloc
movl %ebx, %r12d
testl %r14d, %r14d
jle .LBB5_7
# %bb.3: # %.preheader.lr.ph.i
xorl %edi, %edi
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_4: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB5_5 Depth 2
movl %edi, %edx
leaq (%rax,%rdx,4), %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_5: # Parent Loop BB5_4 Depth=1
# => This Inner Loop Header: Depth=2
movl $2, (%rdx,%rsi,4)
incq %rsi
cmpq %rsi, %r12
jne .LBB5_5
# %bb.6: # %._crit_edge.i
# in Loop: Header=BB5_4 Depth=1
incq %rcx
addl %ebx, %edi
cmpq %r12, %rcx
jne .LBB5_4
.LBB5_7: # %_Z20newMatrizVectorizadaii.exit
movq %rax, 128(%rsp) # 8-byte Spill
movq %r13, %rdi
callq malloc
testl %ebx, %ebx
jle .LBB5_12
# %bb.8: # %.preheader.lr.ph.i47
xorl %edi, %edi
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_9: # %.preheader.i49
# =>This Loop Header: Depth=1
# Child Loop BB5_10 Depth 2
movl %edi, %edx
leaq (%rax,%rdx,4), %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_10: # Parent Loop BB5_9 Depth=1
# => This Inner Loop Header: Depth=2
movl $3, (%rdx,%rsi,4)
incq %rsi
cmpq %rsi, %r12
jne .LBB5_10
# %bb.11: # %._crit_edge.i55
# in Loop: Header=BB5_9 Depth=1
incq %rcx
addl %ebx, %edi
cmpq %r12, %rcx
jne .LBB5_9
.LBB5_12: # %_Z20newMatrizVectorizadaii.exit58
movq %rax, 120(%rsp) # 8-byte Spill
shlq $2, %r15
movq %r15, 112(%rsp) # 8-byte Spill
movq %r13, %rdi
callq malloc
movq %rax, %r13
testl %ebx, %ebx
jle .LBB5_15
# %bb.13: # %.preheader.lr.ph.i59
leaq (,%r12,4), %rbp
xorl %r14d, %r14d
movq %r12, %r15
.p2align 4, 0x90
.LBB5_14: # %.preheader.i61
# =>This Inner Loop Header: Depth=1
movl %r14d, %eax
leaq (,%rax,4), %rdi
addq %r13, %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq memset@PLT
addl %ebx, %r14d
decq %r15
jne .LBB5_14
.LBB5_15: # %_Z20newMatrizVectorizadaii.exit70
movabsq $17179869188, %r15 # imm = 0x400000004
leaq 32(%rsp), %rdi
movq 112(%rsp), %r14 # 8-byte Reload
movq %r14, %rsi
callq hipMalloc
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 32(%rsp), %rdi
movq 128(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movq 120(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leal 3(%rbx), %eax
testl %ebx, %ebx
cmovnsl %ebx, %eax
sarl $2, %eax
movl %ebx, %ecx
andl $3, %ecx
cmpl $1, %ecx
sbbl $-1, %eax
movq %rax, %rdi
shlq $32, %rdi
orq %rax, %rdi
movq %rdi, 176(%rsp) # 8-byte Spill
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_17
# %bb.16:
movq 16(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebx, 12(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 12(%rsp), %rax
movq %rax, 168(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z19multiplicarMatricesPiS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_17:
movq 16(%rsp), %rsi
movq %r13, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
testl %ebx, %ebx
movq %r13, 136(%rsp) # 8-byte Spill
jle .LBB5_22
# %bb.18: # %.preheader.lr.ph.i71
xorl %ebp, %ebp
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_19: # %.preheader.i72
# =>This Loop Header: Depth=1
# Child Loop BB5_20 Depth 2
movl %ebp, %eax
leaq (,%rax,4), %r15
addq %r13, %r15
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB5_20: # Parent Loop BB5_19 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r15,%r13,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r13
cmpq %r13, %r12
jne .LBB5_20
# %bb.21: # %._crit_edge.i77
# in Loop: Header=BB5_19 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addl %ebx, %ebp
cmpq %r12, %r14
movq 136(%rsp), %r13 # 8-byte Reload
jne .LBB5_19
.LBB5_22: # %_Z14imprimirMatrizPii.exit
movq 176(%rsp), %rdi # 8-byte Reload
movl $1, %esi
movabsq $17179869188, %rdx # imm = 0x400000004
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_24
# %bb.23:
movq 16(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebx, 12(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 12(%rsp), %rax
movq %rax, 168(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z13sumarMatricesPiS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_24:
movq 16(%rsp), %rsi
movq %r13, %rdi
movq 112(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
testl %ebx, %ebx
jle .LBB5_29
# %bb.25: # %.preheader.lr.ph.i84
xorl %ebp, %ebp
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_26: # %.preheader.i86
# =>This Loop Header: Depth=1
# Child Loop BB5_27 Depth 2
movl %ebp, %eax
leaq (,%rax,4), %r15
addq %r13, %r15
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB5_27: # Parent Loop BB5_26 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r15,%r13,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r13
cmpq %r13, %r12
jne .LBB5_27
# %bb.28: # %._crit_edge.i92
# in Loop: Header=BB5_26 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addl %ebx, %ebp
cmpq %r12, %r14
movq 136(%rsp), %r13 # 8-byte Reload
jne .LBB5_26
.LBB5_29: # %_Z14imprimirMatrizPii.exit96
movl $.Lstr.1, %edi
callq puts@PLT
movq 128(%rsp), %rdi # 8-byte Reload
callq free
movq 120(%rsp), %rdi # 8-byte Reload
callq free
movq %r13, %rdi
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19multiplicarMatricesPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13sumarMatricesPiS_S_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19multiplicarMatricesPiS_S_i,@object # @_Z19multiplicarMatricesPiS_S_i
.section .rodata,"a",@progbits
.globl _Z19multiplicarMatricesPiS_S_i
.p2align 3, 0x0
_Z19multiplicarMatricesPiS_S_i:
.quad _Z34__device_stub__multiplicarMatricesPiS_S_i
.size _Z19multiplicarMatricesPiS_S_i, 8
.type _Z13sumarMatricesPiS_S_i,@object # @_Z13sumarMatricesPiS_S_i
.globl _Z13sumarMatricesPiS_S_i
.p2align 3, 0x0
_Z13sumarMatricesPiS_S_i:
.quad _Z28__device_stub__sumarMatricesPiS_S_i
.size _Z13sumarMatricesPiS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d "
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z19multiplicarMatricesPiS_S_i"
.size .L__unnamed_1, 31
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z13sumarMatricesPiS_S_i"
.size .L__unnamed_2, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "==============================================RESULTADO============================================"
.size .Lstr, 100
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "==================================================================================================="
.size .Lstr.1, 100
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__multiplicarMatricesPiS_S_i
.addrsig_sym _Z28__device_stub__sumarMatricesPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19multiplicarMatricesPiS_S_i
.addrsig_sym _Z13sumarMatricesPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
using namespace std;
/*MatVecMul_Kernel*/
__global__
void MatVecMul_Kernel(float* A, float* B, float* C, int n) {
int i = threadIdx.x;
int offset;
float sum = 0;
if (i < n) {
for (int j = 0; j < n; j++) {
offset = i*n + j;
sum += A[offset] * B[j];
}
C[i] = sum;
}
}
/*MatVecMul_GPU*/
void MatVecMul_GPU(float* h_A, float* h_B, float* h_C, int n) {
int sizeM = n*n * sizeof(float);
int sizeV = n * sizeof(float);
float *d_A;
float *d_B;
float *d_C;
cudaMalloc(&d_A, sizeM);
cudaMemcpy(d_A, h_A, sizeM, cudaMemcpyHostToDevice);
cudaMalloc(&d_B, sizeV);
cudaMemcpy(d_B, h_B, sizeV, cudaMemcpyHostToDevice);
cudaMalloc(&d_C, sizeV);
cudaMemcpy(d_C, h_C, sizeV, cudaMemcpyHostToDevice);
//dim3 dimGrid(ceil(n / 32.0), 1, 1);
//dim3 dimBlock(32.0, 1, 1);
MatVecMul_Kernel <<< 1, 10 >>> (d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, sizeV, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
//Host Matrix
float *h_A, *h_B, *h_C;
int n = 10;
h_A = (float*)malloc(n*n * sizeof(float));
h_B = (float*)malloc(n * sizeof(float));
h_C = (float*)malloc(n * sizeof(float));
//Create Matrix
for (int i = 0; i < n*n; i++) {
h_A[i] = 1.0;
}
//Create Vector
for (int i = 0; i < n; i++) {
h_B[i] = 1.0;
h_C[i] = 1.0;
}
//MatVecMul (Main)
chrono::time_point<chrono::system_clock> MatVecMul_GPU_Start, MatVecMul_GPU_End;
MatVecMul_GPU_Start = chrono::system_clock::now();
MatVecMul_GPU(h_A, h_B, h_C, n);
MatVecMul_GPU_End = chrono::system_clock::now();
cout << "MatVecMul_GPU: " << chrono::duration_cast<chrono::nanoseconds>(MatVecMul_GPU_End - MatVecMul_GPU_Start).count() << "ns." << endl;
//Print MatVecMul
for (int i = 0; i < n; i++) {
cout << h_C[i] << " ";
}
cout << endl;
//Free
free(h_A);
free(h_B);
free(h_C);
return 0;
} | code for sm_80
Function : _Z16MatVecMul_KernelPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x001fda0003f06270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */
/* 0x000fe200078e00ff */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0060*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */
/* 0x000fc600000001ff */
/*0070*/ ISETP.GE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fda0003f06270 */
/*0080*/ @!P0 BRA 0xa50 ; /* 0x000009c000008947 */
/* 0x000fea0003800000 */
/*0090*/ IADD3 R2, R6.reuse, -0x1, RZ ; /* 0xffffffff06027810 */
/* 0x040fe20007ffe0ff */
/*00a0*/ IMAD.MOV.U32 R15, RZ, RZ, RZ ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e00ff */
/*00b0*/ LOP3.LUT R6, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306067812 */
/* 0x000fe400078ec0ff */
/*00c0*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f06070 */
/*00d0*/ MOV R7, RZ ; /* 0x000000ff00077202 */
/* 0x000fd60000000f00 */
/*00e0*/ @!P0 BRA 0x910 ; /* 0x0000082000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R8, -R6, c[0x0][0x178], RZ ; /* 0x00005e0006087a10 */
/* 0x000fe20007ffe1ff */
/*0100*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0110*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */
/* 0x000fe200000001ff */
/*0120*/ IMAD R2, R0, c[0x0][0x178], RZ ; /* 0x00005e0000027a24 */
/* 0x000fe200078e02ff */
/*0130*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f04270 */
/*0140*/ IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff077224 */
/* 0x000fe200078e00ff */
/*0150*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */
/* 0x000fe20000000f00 */
/*0160*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */
/* 0x000fe400078e00ff */
/*0170*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fd000078e0203 */
/*0180*/ @!P0 BRA 0x790 ; /* 0x0000060000008947 */
/* 0x000fea0003800000 */
/*0190*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*01a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01b0*/ @!P1 BRA 0x550 ; /* 0x0000039000009947 */
/* 0x000fea0003800000 */
/*01c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*01d0*/ LDG.E R16, [R4.64] ; /* 0x0000000404107981 */
/* 0x000ea8000c1e1900 */
/*01e0*/ LDG.E R17, [R2.64] ; /* 0x0000000402117981 */
/* 0x000ea8000c1e1900 */
/*01f0*/ LDG.E R18, [R4.64+0x4] ; /* 0x0000040404127981 */
/* 0x000ee8000c1e1900 */
/*0200*/ LDG.E R25, [R2.64+0x4] ; /* 0x0000040402197981 */
/* 0x000ee8000c1e1900 */
/*0210*/ LDG.E R19, [R4.64+0x8] ; /* 0x0000080404137981 */
/* 0x000f28000c1e1900 */
/*0220*/ LDG.E R20, [R2.64+0x8] ; /* 0x0000080402147981 */
/* 0x000f28000c1e1900 */
/*0230*/ LDG.E R22, [R4.64+0xc] ; /* 0x00000c0404167981 */
/* 0x000f68000c1e1900 */
/*0240*/ LDG.E R21, [R2.64+0xc] ; /* 0x00000c0402157981 */
/* 0x000f68000c1e1900 */
/*0250*/ LDG.E R23, [R4.64+0x10] ; /* 0x0000100404177981 */
/* 0x000f68000c1e1900 */
/*0260*/ LDG.E R24, [R2.64+0x10] ; /* 0x0000100402187981 */
/* 0x000f68000c1e1900 */
/*0270*/ LDG.E R9, [R4.64+0x14] ; /* 0x0000140404097981 */
/* 0x000f68000c1e1900 */
/*0280*/ LDG.E R10, [R2.64+0x14] ; /* 0x00001404020a7981 */
/* 0x000f68000c1e1900 */
/*0290*/ LDG.E R11, [R4.64+0x18] ; /* 0x00001804040b7981 */
/* 0x000f68000c1e1900 */
/*02a0*/ LDG.E R12, [R2.64+0x18] ; /* 0x00001804020c7981 */
/* 0x000f68000c1e1900 */
/*02b0*/ LDG.E R13, [R4.64+0x1c] ; /* 0x00001c04040d7981 */
/* 0x000f68000c1e1900 */
/*02c0*/ LDG.E R14, [R2.64+0x1c] ; /* 0x00001c04020e7981 */
/* 0x000f62000c1e1900 */
/*02d0*/ FFMA R17, R16, R17, R15 ; /* 0x0000001110117223 */
/* 0x004fc6000000000f */
/*02e0*/ LDG.E R15, [R4.64+0x20] ; /* 0x00002004040f7981 */
/* 0x0000a8000c1e1900 */
/*02f0*/ LDG.E R16, [R2.64+0x20] ; /* 0x0000200402107981 */
/* 0x0002a2000c1e1900 */
/*0300*/ FFMA R25, R18, R25, R17 ; /* 0x0000001912197223 */
/* 0x008fc60000000011 */
/*0310*/ LDG.E R17, [R4.64+0x24] ; /* 0x0000240404117981 */
/* 0x0000e8000c1e1900 */
/*0320*/ LDG.E R18, [R2.64+0x24] ; /* 0x0000240402127981 */
/* 0x0002e2000c1e1900 */
/*0330*/ FFMA R25, R19, R20, R25 ; /* 0x0000001413197223 */
/* 0x010fc60000000019 */
/*0340*/ LDG.E R19, [R4.64+0x28] ; /* 0x0000280404137981 */
/* 0x000128000c1e1900 */
/*0350*/ LDG.E R20, [R2.64+0x28] ; /* 0x0000280402147981 */
/* 0x000322000c1e1900 */
/*0360*/ FFMA R25, R22, R21, R25 ; /* 0x0000001516197223 */
/* 0x020fc60000000019 */
/*0370*/ LDG.E R21, [R4.64+0x2c] ; /* 0x00002c0404157981 */
/* 0x000168000c1e1900 */
/*0380*/ LDG.E R22, [R2.64+0x2c] ; /* 0x00002c0402167981 */
/* 0x000362000c1e1900 */
/*0390*/ FFMA R25, R23, R24, R25 ; /* 0x0000001817197223 */
/* 0x000fc60000000019 */
/*03a0*/ LDG.E R23, [R4.64+0x30] ; /* 0x0000300404177981 */
/* 0x000168000c1e1900 */
/*03b0*/ LDG.E R24, [R2.64+0x30] ; /* 0x0000300402187981 */
/* 0x000362000c1e1900 */
/*03c0*/ FFMA R25, R9, R10, R25 ; /* 0x0000000a09197223 */
/* 0x000fc60000000019 */
/*03d0*/ LDG.E R9, [R4.64+0x34] ; /* 0x0000340404097981 */
/* 0x000168000c1e1900 */
/*03e0*/ LDG.E R10, [R2.64+0x34] ; /* 0x00003404020a7981 */
/* 0x000362000c1e1900 */
/*03f0*/ FFMA R25, R11, R12, R25 ; /* 0x0000000c0b197223 */
/* 0x000fc60000000019 */
/*0400*/ LDG.E R11, [R4.64+0x38] ; /* 0x00003804040b7981 */
/* 0x000168000c1e1900 */
/*0410*/ LDG.E R12, [R2.64+0x38] ; /* 0x00003804020c7981 */
/* 0x000362000c1e1900 */
/*0420*/ FFMA R25, R13, R14, R25 ; /* 0x0000000e0d197223 */
/* 0x000fc60000000019 */
/*0430*/ LDG.E R13, [R4.64+0x3c] ; /* 0x00003c04040d7981 */
/* 0x000168000c1e1900 */
/*0440*/ LDG.E R14, [R2.64+0x3c] ; /* 0x00003c04020e7981 */
/* 0x000362000c1e1900 */
/*0450*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */
/* 0x000fe40007ffe0ff */
/*0460*/ IADD3 R7, R7, 0x10, RZ ; /* 0x0000001007077810 */
/* 0x000fe40007ffe0ff */
/*0470*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*0480*/ IADD3 R4, P3, R4, 0x40, RZ ; /* 0x0000004004047810 */
/* 0x001fc40007f7e0ff */
/*0490*/ IADD3 R2, P2, R2, 0x40, RZ ; /* 0x0000004002027810 */
/* 0x002fe40007f5e0ff */
/*04a0*/ IADD3.X R5, RZ, R5, RZ, P3, !PT ; /* 0x00000005ff057210 */
/* 0x000fc60001ffe4ff */
/*04b0*/ IMAD.X R3, RZ, RZ, R3, P2 ; /* 0x000000ffff037224 */
/* 0x000fe400010e0603 */
/*04c0*/ FFMA R15, R15, R16, R25 ; /* 0x000000100f0f7223 */
/* 0x004fc80000000019 */
/*04d0*/ FFMA R15, R17, R18, R15 ; /* 0x00000012110f7223 */
/* 0x008fc8000000000f */
/*04e0*/ FFMA R15, R19, R20, R15 ; /* 0x00000014130f7223 */
/* 0x010fc8000000000f */
/*04f0*/ FFMA R15, R21, R22, R15 ; /* 0x00000016150f7223 */
/* 0x020fc8000000000f */
/*0500*/ FFMA R15, R23, R24, R15 ; /* 0x00000018170f7223 */
/* 0x000fc8000000000f */
/*0510*/ FFMA R9, R9, R10, R15 ; /* 0x0000000a09097223 */
/* 0x000fc8000000000f */
/*0520*/ FFMA R9, R11, R12, R9 ; /* 0x0000000c0b097223 */
/* 0x000fc80000000009 */
/*0530*/ FFMA R15, R13, R14, R9 ; /* 0x0000000e0d0f7223 */
/* 0x000fe20000000009 */
/*0540*/ @P1 BRA 0x1d0 ; /* 0xfffffc8000001947 */
/* 0x000fea000383ffff */
/*0550*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f24270 */
/*0560*/ @!P1 BRA 0x770 ; /* 0x0000020000009947 */
/* 0x000fea0003800000 */
/*0570*/ LDG.E R16, [R4.64] ; /* 0x0000000404107981 */
/* 0x0000a8000c1e1900 */
/*0580*/ LDG.E R17, [R2.64] ; /* 0x0000000402117981 */
/* 0x000ea8000c1e1900 */
/*0590*/ LDG.E R19, [R4.64+0x4] ; /* 0x0000040404137981 */
/* 0x0000e8000c1e1900 */
/*05a0*/ LDG.E R18, [R2.64+0x4] ; /* 0x0000040402127981 */
/* 0x000ee8000c1e1900 */
/*05b0*/ LDG.E R21, [R4.64+0x8] ; /* 0x0000080404157981 */
/* 0x000128000c1e1900 */
/*05c0*/ LDG.E R20, [R2.64+0x8] ; /* 0x0000080402147981 */
/* 0x000f28000c1e1900 */
/*05d0*/ LDG.E R23, [R4.64+0xc] ; /* 0x00000c0404177981 */
/* 0x000168000c1e1900 */
/*05e0*/ LDG.E R22, [R2.64+0xc] ; /* 0x00000c0402167981 */
/* 0x000f68000c1e1900 */
/*05f0*/ LDG.E R25, [R4.64+0x10] ; /* 0x0000100404197981 */
/* 0x000168000c1e1900 */
/*0600*/ LDG.E R24, [R2.64+0x10] ; /* 0x0000100402187981 */
/* 0x000f68000c1e1900 */
/*0610*/ LDG.E R13, [R4.64+0x14] ; /* 0x00001404040d7981 */
/* 0x000168000c1e1900 */
/*0620*/ LDG.E R14, [R2.64+0x14] ; /* 0x00001404020e7981 */
/* 0x000f68000c1e1900 */
/*0630*/ LDG.E R11, [R4.64+0x18] ; /* 0x00001804040b7981 */
/* 0x000168000c1e1900 */
/*0640*/ LDG.E R12, [R2.64+0x18] ; /* 0x00001804020c7981 */
/* 0x000f68000c1e1900 */
/*0650*/ LDG.E R9, [R4.64+0x1c] ; /* 0x00001c0404097981 */
/* 0x000168000c1e1900 */
/*0660*/ LDG.E R10, [R2.64+0x1c] ; /* 0x00001c04020a7981 */
/* 0x000362000c1e1900 */
/*0670*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0680*/ IADD3 R7, R7, 0x8, RZ ; /* 0x0000000807077810 */
/* 0x000fe40007ffe0ff */
/*0690*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */
/* 0x000fe40007ffe0ff */
/*06a0*/ IADD3 R4, P2, R4, 0x20, RZ ; /* 0x0000002004047810 */
/* 0x001fca0007f5e0ff */
/*06b0*/ IMAD.X R5, RZ, RZ, R5, P2 ; /* 0x000000ffff057224 */
/* 0x000fe400010e0605 */
/*06c0*/ FFMA R16, R16, R17, R15 ; /* 0x0000001110107223 */
/* 0x004fc8000000000f */
/*06d0*/ FFMA R16, R19, R18, R16 ; /* 0x0000001213107223 */
/* 0x008fc80000000010 */
/*06e0*/ FFMA R16, R21, R20, R16 ; /* 0x0000001415107223 */
/* 0x010fc80000000010 */
/*06f0*/ FFMA R16, R23, R22, R16 ; /* 0x0000001617107223 */
/* 0x020fc80000000010 */
/*0700*/ FFMA R16, R25, R24, R16 ; /* 0x0000001819107223 */
/* 0x000fc80000000010 */
/*0710*/ FFMA R13, R13, R14, R16 ; /* 0x0000000e0d0d7223 */
/* 0x000fc80000000010 */
/*0720*/ FFMA R11, R11, R12, R13 ; /* 0x0000000c0b0b7223 */
/* 0x000fe2000000000d */
/*0730*/ IADD3 R12, P1, R2, 0x20, RZ ; /* 0x00000020020c7810 */
/* 0x000fc80007f3e0ff */
/*0740*/ IADD3.X R3, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff037210 */
/* 0x002fe40000ffe4ff */
/*0750*/ MOV R2, R12 ; /* 0x0000000c00027202 */
/* 0x000fe20000000f00 */
/*0760*/ FFMA R15, R9, R10, R11 ; /* 0x0000000a090f7223 */
/* 0x000fe4000000000b */
/*0770*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fda0000705670 */
/*0780*/ @!P0 BRA 0x910 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*0790*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */
/* 0x000ea8000c1e1900 */
/*07a0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ea8000c1e1900 */
/*07b0*/ LDG.E R12, [R4.64+0x4] ; /* 0x00000404040c7981 */
/* 0x0000e8000c1e1900 */
/*07c0*/ LDG.E R11, [R2.64+0x4] ; /* 0x00000404020b7981 */
/* 0x000ee8000c1e1900 */
/*07d0*/ LDG.E R14, [R4.64+0x8] ; /* 0x00000804040e7981 */
/* 0x000128000c1e1900 */
/*07e0*/ LDG.E R13, [R2.64+0x8] ; /* 0x00000804020d7981 */
/* 0x000328000c1e1900 */
/*07f0*/ LDG.E R16, [R4.64+0xc] ; /* 0x00000c0404107981 */
/* 0x000168000c1e1900 */
/*0800*/ LDG.E R17, [R2.64+0xc] ; /* 0x00000c0402117981 */
/* 0x000362000c1e1900 */
/*0810*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */
/* 0x000fc40007ffe0ff */
/*0820*/ IADD3 R7, R7, 0x4, RZ ; /* 0x0000000407077810 */
/* 0x000fe40007ffe0ff */
/*0830*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f05270 */
/*0840*/ FFMA R9, R10, R9, R15 ; /* 0x000000090a097223 */
/* 0x004fe2000000000f */
/*0850*/ IADD3 R10, P2, R4, 0x10, RZ ; /* 0x00000010040a7810 */
/* 0x000fca0007f5e0ff */
/*0860*/ IMAD.MOV.U32 R4, RZ, RZ, R10 ; /* 0x000000ffff047224 */
/* 0x001fe400078e000a */
/*0870*/ FFMA R9, R12, R11, R9 ; /* 0x0000000b0c097223 */
/* 0x008fe20000000009 */
/*0880*/ IADD3 R12, P1, R2, 0x10, RZ ; /* 0x00000010020c7810 */
/* 0x000fe20007f3e0ff */
/*0890*/ IMAD.X R11, RZ, RZ, R5, P2 ; /* 0x000000ffff0b7224 */
/* 0x000fc800010e0605 */
/*08a0*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x002fe200078e000c */
/*08b0*/ MOV R5, R11 ; /* 0x0000000b00057202 */
/* 0x000fe20000000f00 */
/*08c0*/ FFMA R9, R14, R13, R9 ; /* 0x0000000d0e097223 */
/* 0x010fe20000000009 */
/*08d0*/ IADD3.X R13, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff0d7210 */
/* 0x000fc80000ffe4ff */
/*08e0*/ MOV R3, R13 ; /* 0x0000000d00037202 */
/* 0x000fe20000000f00 */
/*08f0*/ FFMA R15, R16, R17, R9 ; /* 0x00000011100f7223 */
/* 0x020fe20000000009 */
/*0900*/ @P0 BRA 0x790 ; /* 0xfffffe8000000947 */
/* 0x000fea000383ffff */
/*0910*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fda0003f05270 */
/*0920*/ @!P0 BRA 0xa50 ; /* 0x0000012000008947 */
/* 0x000fea0003800000 */
/*0930*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe400078e00ff */
/*0940*/ IMAD R4, R0, c[0x0][0x178], R7 ; /* 0x00005e0000047a24 */
/* 0x000fe400078e0207 */
/*0950*/ IMAD.WIDE R2, R7, R5, c[0x0][0x168] ; /* 0x00005a0007027625 */
/* 0x000fc800078e0205 */
/*0960*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe200078e0205 */
/*0970*/ MOV R8, R2 ; /* 0x0000000200087202 */
/* 0x000fc60000000f00 */
/*0980*/ IMAD.MOV.U32 R7, RZ, RZ, R4 ; /* 0x000000ffff077224 */
/* 0x000fc600078e0004 */
/*0990*/ MOV R2, R8 ; /* 0x0000000800027202 */
/* 0x000fe40000000f00 */
/*09a0*/ MOV R4, R7 ; /* 0x0000000700047202 */
/* 0x000fc80000000f00 */
/*09b0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x0000a8000c1e1900 */
/*09c0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x0002a2000c1e1900 */
/*09d0*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */
/* 0x000fe40007ffe0ff */
/*09e0*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x000fe40007f3e0ff */
/*09f0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f05270 */
/*0a00*/ IADD3 R7, P2, R7, 0x4, RZ ; /* 0x0000000407077810 */
/* 0x000fe20007f5e0ff */
/*0a10*/ IMAD.X R3, RZ, RZ, R3, P1 ; /* 0x000000ffff037224 */
/* 0x001fc600008e0603 */
/*0a20*/ IADD3.X R5, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff057210 */
/* 0x002fe200017fe4ff */
/*0a30*/ FFMA R15, R2, R4, R15 ; /* 0x00000004020f7223 */
/* 0x004fcc000000000f */
/*0a40*/ @P0 BRA 0x990 ; /* 0xffffff4000000947 */
/* 0x000fea000383ffff */
/*0a50*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fca0000000f00 */
/*0a60*/ IMAD.WIDE R2, R0, R3, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fca00078e0203 */
/*0a70*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x000fe2000c101904 */
/*0a80*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0a90*/ BRA 0xa90; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
using namespace std;
/*MatVecMul_Kernel*/
__global__
void MatVecMul_Kernel(float* A, float* B, float* C, int n) {
int i = threadIdx.x;
int offset;
float sum = 0;
if (i < n) {
for (int j = 0; j < n; j++) {
offset = i*n + j;
sum += A[offset] * B[j];
}
C[i] = sum;
}
}
/*MatVecMul_GPU*/
void MatVecMul_GPU(float* h_A, float* h_B, float* h_C, int n) {
int sizeM = n*n * sizeof(float);
int sizeV = n * sizeof(float);
float *d_A;
float *d_B;
float *d_C;
cudaMalloc(&d_A, sizeM);
cudaMemcpy(d_A, h_A, sizeM, cudaMemcpyHostToDevice);
cudaMalloc(&d_B, sizeV);
cudaMemcpy(d_B, h_B, sizeV, cudaMemcpyHostToDevice);
cudaMalloc(&d_C, sizeV);
cudaMemcpy(d_C, h_C, sizeV, cudaMemcpyHostToDevice);
//dim3 dimGrid(ceil(n / 32.0), 1, 1);
//dim3 dimBlock(32.0, 1, 1);
MatVecMul_Kernel <<< 1, 10 >>> (d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, sizeV, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
//Host Matrix
float *h_A, *h_B, *h_C;
int n = 10;
h_A = (float*)malloc(n*n * sizeof(float));
h_B = (float*)malloc(n * sizeof(float));
h_C = (float*)malloc(n * sizeof(float));
//Create Matrix
for (int i = 0; i < n*n; i++) {
h_A[i] = 1.0;
}
//Create Vector
for (int i = 0; i < n; i++) {
h_B[i] = 1.0;
h_C[i] = 1.0;
}
//MatVecMul (Main)
chrono::time_point<chrono::system_clock> MatVecMul_GPU_Start, MatVecMul_GPU_End;
MatVecMul_GPU_Start = chrono::system_clock::now();
MatVecMul_GPU(h_A, h_B, h_C, n);
MatVecMul_GPU_End = chrono::system_clock::now();
cout << "MatVecMul_GPU: " << chrono::duration_cast<chrono::nanoseconds>(MatVecMul_GPU_End - MatVecMul_GPU_Start).count() << "ns." << endl;
//Print MatVecMul
for (int i = 0; i < n; i++) {
cout << h_C[i] << " ";
}
cout << endl;
//Free
free(h_A);
free(h_B);
free(h_C);
return 0;
} | .file "tmpxft_0018df27_00000000-6_e2.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3778:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3778:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i
.type _Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i, @function
_Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i:
.LFB3800:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16MatVecMul_KernelPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3800:
.size _Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i, .-_Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i
.globl _Z16MatVecMul_KernelPfS_S_i
.type _Z16MatVecMul_KernelPfS_S_i, @function
_Z16MatVecMul_KernelPfS_S_i:
.LFB3801:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3801:
.size _Z16MatVecMul_KernelPfS_S_i, .-_Z16MatVecMul_KernelPfS_S_i
.globl _Z13MatVecMul_GPUPfS_S_i
.type _Z13MatVecMul_GPUPfS_S_i, @function
_Z13MatVecMul_GPUPfS_S_i:
.LFB3768:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r14
movq %rsi, %r13
movq %rdx, %r12
movl %ecx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl %ecx, %ebx
imull %ecx, %ebx
sall $2, %ebx
movslq %ebx, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leal 0(,%rbp,4), %ebx
movslq %ebx, %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $10, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl %ebp, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3768:
.size _Z13MatVecMul_GPUPfS_S_i, .-_Z13MatVecMul_GPUPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "MatVecMul_GPU: "
.LC2:
.string "ns."
.LC3:
.string " "
.text
.globl main
.type main, @function
main:
.LFB3769:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl $400, %edi
call malloc@PLT
movq %rax, %rbx
movq %rax, 8(%rsp)
movl $40, %edi
call malloc@PLT
movq %rax, %r12
movl $40, %edi
call malloc@PLT
movq %rax, %rbp
movq %rbx, %rax
leaq 400(%rbx), %rdx
movss .LC0(%rip), %xmm0
.L18:
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L18
movl $0, %eax
movss .LC0(%rip), %xmm0
.L19:
movss %xmm0, (%r12,%rax)
movss %xmm0, 0(%rbp,%rax)
addq $4, %rax
cmpq $40, %rax
jne .L19
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r13
movl $10, %ecx
movq %rbp, %rdx
movq %r12, %rsi
movq 8(%rsp), %rdi
call _Z13MatVecMul_GPUPfS_S_i
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %rbx
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %rbx, %rsi
subq %r13, %rsi
call _ZNSo9_M_insertIlEERSoT_@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rbp, %rbx
leaq 40(%rbp), %r15
leaq _ZSt4cout(%rip), %r14
leaq .LC3(%rip), %r13
.L20:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r14, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $1, %edx
movq %r13, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %r15, %rbx
jne .L20
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 8(%rsp), %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movl $0, %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3769:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z16MatVecMul_KernelPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3803:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z16MatVecMul_KernelPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3803:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
using namespace std;
/*MatVecMul_Kernel*/
__global__
void MatVecMul_Kernel(float* A, float* B, float* C, int n) {
int i = threadIdx.x;
int offset;
float sum = 0;
if (i < n) {
for (int j = 0; j < n; j++) {
offset = i*n + j;
sum += A[offset] * B[j];
}
C[i] = sum;
}
}
/*MatVecMul_GPU*/
void MatVecMul_GPU(float* h_A, float* h_B, float* h_C, int n) {
int sizeM = n*n * sizeof(float);
int sizeV = n * sizeof(float);
float *d_A;
float *d_B;
float *d_C;
cudaMalloc(&d_A, sizeM);
cudaMemcpy(d_A, h_A, sizeM, cudaMemcpyHostToDevice);
cudaMalloc(&d_B, sizeV);
cudaMemcpy(d_B, h_B, sizeV, cudaMemcpyHostToDevice);
cudaMalloc(&d_C, sizeV);
cudaMemcpy(d_C, h_C, sizeV, cudaMemcpyHostToDevice);
//dim3 dimGrid(ceil(n / 32.0), 1, 1);
//dim3 dimBlock(32.0, 1, 1);
MatVecMul_Kernel <<< 1, 10 >>> (d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, sizeV, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
//Host Matrix
float *h_A, *h_B, *h_C;
int n = 10;
h_A = (float*)malloc(n*n * sizeof(float));
h_B = (float*)malloc(n * sizeof(float));
h_C = (float*)malloc(n * sizeof(float));
//Create Matrix
for (int i = 0; i < n*n; i++) {
h_A[i] = 1.0;
}
//Create Vector
for (int i = 0; i < n; i++) {
h_B[i] = 1.0;
h_C[i] = 1.0;
}
//MatVecMul (Main)
chrono::time_point<chrono::system_clock> MatVecMul_GPU_Start, MatVecMul_GPU_End;
MatVecMul_GPU_Start = chrono::system_clock::now();
MatVecMul_GPU(h_A, h_B, h_C, n);
MatVecMul_GPU_End = chrono::system_clock::now();
cout << "MatVecMul_GPU: " << chrono::duration_cast<chrono::nanoseconds>(MatVecMul_GPU_End - MatVecMul_GPU_Start).count() << "ns." << endl;
//Print MatVecMul
for (int i = 0; i < n; i++) {
cout << h_C[i] << " ";
}
cout << endl;
//Free
free(h_A);
free(h_B);
free(h_C);
return 0;
} | #include <hip/hip_runtime.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
using namespace std;
/*MatVecMul_Kernel*/
__global__
void MatVecMul_Kernel(float* A, float* B, float* C, int n) {
int i = threadIdx.x;
int offset;
float sum = 0;
if (i < n) {
for (int j = 0; j < n; j++) {
offset = i*n + j;
sum += A[offset] * B[j];
}
C[i] = sum;
}
}
/*MatVecMul_GPU*/
void MatVecMul_GPU(float* h_A, float* h_B, float* h_C, int n) {
int sizeM = n*n * sizeof(float);
int sizeV = n * sizeof(float);
float *d_A;
float *d_B;
float *d_C;
hipMalloc(&d_A, sizeM);
hipMemcpy(d_A, h_A, sizeM, hipMemcpyHostToDevice);
hipMalloc(&d_B, sizeV);
hipMemcpy(d_B, h_B, sizeV, hipMemcpyHostToDevice);
hipMalloc(&d_C, sizeV);
hipMemcpy(d_C, h_C, sizeV, hipMemcpyHostToDevice);
//dim3 dimGrid(ceil(n / 32.0), 1, 1);
//dim3 dimBlock(32.0, 1, 1);
MatVecMul_Kernel <<< 1, 10 >>> (d_A, d_B, d_C, n);
hipMemcpy(h_C, d_C, sizeV, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
int main() {
//Host Matrix
float *h_A, *h_B, *h_C;
int n = 10;
h_A = (float*)malloc(n*n * sizeof(float));
h_B = (float*)malloc(n * sizeof(float));
h_C = (float*)malloc(n * sizeof(float));
//Create Matrix
for (int i = 0; i < n*n; i++) {
h_A[i] = 1.0;
}
//Create Vector
for (int i = 0; i < n; i++) {
h_B[i] = 1.0;
h_C[i] = 1.0;
}
//MatVecMul (Main)
chrono::time_point<chrono::system_clock> MatVecMul_GPU_Start, MatVecMul_GPU_End;
MatVecMul_GPU_Start = chrono::system_clock::now();
MatVecMul_GPU(h_A, h_B, h_C, n);
MatVecMul_GPU_End = chrono::system_clock::now();
cout << "MatVecMul_GPU: " << chrono::duration_cast<chrono::nanoseconds>(MatVecMul_GPU_End - MatVecMul_GPU_Start).count() << "ns." << endl;
//Print MatVecMul
for (int i = 0; i < n; i++) {
cout << h_C[i] << " ";
}
cout << endl;
//Free
free(h_A);
free(h_B);
free(h_C);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
using namespace std;
/*MatVecMul_Kernel*/
__global__
void MatVecMul_Kernel(float* A, float* B, float* C, int n) {
int i = threadIdx.x;
int offset;
float sum = 0;
if (i < n) {
for (int j = 0; j < n; j++) {
offset = i*n + j;
sum += A[offset] * B[j];
}
C[i] = sum;
}
}
/*MatVecMul_GPU*/
void MatVecMul_GPU(float* h_A, float* h_B, float* h_C, int n) {
int sizeM = n*n * sizeof(float);
int sizeV = n * sizeof(float);
float *d_A;
float *d_B;
float *d_C;
hipMalloc(&d_A, sizeM);
hipMemcpy(d_A, h_A, sizeM, hipMemcpyHostToDevice);
hipMalloc(&d_B, sizeV);
hipMemcpy(d_B, h_B, sizeV, hipMemcpyHostToDevice);
hipMalloc(&d_C, sizeV);
hipMemcpy(d_C, h_C, sizeV, hipMemcpyHostToDevice);
//dim3 dimGrid(ceil(n / 32.0), 1, 1);
//dim3 dimBlock(32.0, 1, 1);
MatVecMul_Kernel <<< 1, 10 >>> (d_A, d_B, d_C, n);
hipMemcpy(h_C, d_C, sizeV, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
int main() {
//Host Matrix
float *h_A, *h_B, *h_C;
int n = 10;
h_A = (float*)malloc(n*n * sizeof(float));
h_B = (float*)malloc(n * sizeof(float));
h_C = (float*)malloc(n * sizeof(float));
//Create Matrix
for (int i = 0; i < n*n; i++) {
h_A[i] = 1.0;
}
//Create Vector
for (int i = 0; i < n; i++) {
h_B[i] = 1.0;
h_C[i] = 1.0;
}
//MatVecMul (Main)
chrono::time_point<chrono::system_clock> MatVecMul_GPU_Start, MatVecMul_GPU_End;
MatVecMul_GPU_Start = chrono::system_clock::now();
MatVecMul_GPU(h_A, h_B, h_C, n);
MatVecMul_GPU_End = chrono::system_clock::now();
cout << "MatVecMul_GPU: " << chrono::duration_cast<chrono::nanoseconds>(MatVecMul_GPU_End - MatVecMul_GPU_Start).count() << "ns." << endl;
//Print MatVecMul
for (int i = 0; i < n; i++) {
cout << h_C[i] << " ";
}
cout << endl;
//Free
free(h_A);
free(h_B);
free(h_C);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16MatVecMul_KernelPfS_S_i
.globl _Z16MatVecMul_KernelPfS_S_i
.p2align 8
.type _Z16MatVecMul_KernelPfS_S_i,@function
_Z16MatVecMul_KernelPfS_S_i:
s_load_b32 s2, s[0:1], 0x18
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s2, v0
s_cbranch_execz .LBB0_6
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v1, v0, s2
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
.LBB0_3:
global_load_b32 v1, v[3:4], off
s_load_b32 s3, s[6:7], 0x0
v_add_co_u32 v3, vcc_lo, v3, 4
s_add_i32 s2, s2, -1
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_add_u32 s6, s6, 4
s_addc_u32 s7, s7, 0
s_cmp_eq_u32 s2, 0
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v2, s3, v1
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v2, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v2, s[0:1]
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16MatVecMul_KernelPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 8
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16MatVecMul_KernelPfS_S_i, .Lfunc_end0-_Z16MatVecMul_KernelPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16MatVecMul_KernelPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 10
.sgpr_spill_count: 0
.symbol: _Z16MatVecMul_KernelPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
using namespace std;
/*MatVecMul_Kernel*/
__global__
void MatVecMul_Kernel(float* A, float* B, float* C, int n) {
int i = threadIdx.x;
int offset;
float sum = 0;
if (i < n) {
for (int j = 0; j < n; j++) {
offset = i*n + j;
sum += A[offset] * B[j];
}
C[i] = sum;
}
}
/*MatVecMul_GPU*/
void MatVecMul_GPU(float* h_A, float* h_B, float* h_C, int n) {
int sizeM = n*n * sizeof(float);
int sizeV = n * sizeof(float);
float *d_A;
float *d_B;
float *d_C;
hipMalloc(&d_A, sizeM);
hipMemcpy(d_A, h_A, sizeM, hipMemcpyHostToDevice);
hipMalloc(&d_B, sizeV);
hipMemcpy(d_B, h_B, sizeV, hipMemcpyHostToDevice);
hipMalloc(&d_C, sizeV);
hipMemcpy(d_C, h_C, sizeV, hipMemcpyHostToDevice);
//dim3 dimGrid(ceil(n / 32.0), 1, 1);
//dim3 dimBlock(32.0, 1, 1);
MatVecMul_Kernel <<< 1, 10 >>> (d_A, d_B, d_C, n);
hipMemcpy(h_C, d_C, sizeV, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
int main() {
//Host Matrix
float *h_A, *h_B, *h_C;
int n = 10;
h_A = (float*)malloc(n*n * sizeof(float));
h_B = (float*)malloc(n * sizeof(float));
h_C = (float*)malloc(n * sizeof(float));
//Create Matrix
for (int i = 0; i < n*n; i++) {
h_A[i] = 1.0;
}
//Create Vector
for (int i = 0; i < n; i++) {
h_B[i] = 1.0;
h_C[i] = 1.0;
}
//MatVecMul (Main)
chrono::time_point<chrono::system_clock> MatVecMul_GPU_Start, MatVecMul_GPU_End;
MatVecMul_GPU_Start = chrono::system_clock::now();
MatVecMul_GPU(h_A, h_B, h_C, n);
MatVecMul_GPU_End = chrono::system_clock::now();
cout << "MatVecMul_GPU: " << chrono::duration_cast<chrono::nanoseconds>(MatVecMul_GPU_End - MatVecMul_GPU_Start).count() << "ns." << endl;
//Print MatVecMul
for (int i = 0; i < n; i++) {
cout << h_C[i] << " ";
}
cout << endl;
//Free
free(h_A);
free(h_B);
free(h_C);
return 0;
} | .text
.file "e2.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z31__device_stub__MatVecMul_KernelPfS_S_i # -- Begin function _Z31__device_stub__MatVecMul_KernelPfS_S_i
.p2align 4, 0x90
.type _Z31__device_stub__MatVecMul_KernelPfS_S_i,@function
_Z31__device_stub__MatVecMul_KernelPfS_S_i: # @_Z31__device_stub__MatVecMul_KernelPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16MatVecMul_KernelPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__MatVecMul_KernelPfS_S_i, .Lfunc_end0-_Z31__device_stub__MatVecMul_KernelPfS_S_i
.cfi_endproc
# -- End function
.globl _Z13MatVecMul_GPUPfS_S_i # -- Begin function _Z13MatVecMul_GPUPfS_S_i
.p2align 4, 0x90
.type _Z13MatVecMul_GPUPfS_S_i,@function
_Z13MatVecMul_GPUPfS_S_i: # @_Z13MatVecMul_GPUPfS_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %r15d
movq %rdx, %rbx
movq %rsi, %r12
movq %rdi, %r14
leal (,%r15,4), %ebp
movl %ebp, %eax
imull %ecx, %eax
movslq %eax, %r13
leaq 24(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movslq %ebp, %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 9(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %r15d, 36(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z16MatVecMul_KernelPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z13MatVecMul_GPUPfS_S_i, .Lfunc_end1-_Z13MatVecMul_GPUPfS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $400, %edi # imm = 0x190
callq malloc
movq %rax, %rbx
movl $40, %edi
callq malloc
movq %rax, %r14
movl $40, %edi
callq malloc
movq %rax, %r15
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%rbx,%rax,4) # imm = 0x3F800000
incq %rax
cmpq $100, %rax
jne .LBB2_1
# %bb.2: # %.preheader.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_3: # %.preheader
# =>This Inner Loop Header: Depth=1
movl $1065353216, (%r14,%rax,4) # imm = 0x3F800000
movl $1065353216, (%r15,%rax,4) # imm = 0x3F800000
incq %rax
cmpq $10, %rax
jne .LBB2_3
# %bb.4:
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r12
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rdx
movl $10, %ecx
callq _Z13MatVecMul_GPUPfS_S_i
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r13
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $15, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
subq %r12, %r13
movl $_ZSt4cout, %edi
movq %r13, %rsi
callq _ZNSo9_M_insertIlEERSoT_
movq %rax, %r12
movl $.L.str.1, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %r13
testq %r13, %r13
je .LBB2_15
# %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r13)
je .LBB2_7
# %bb.6:
movzbl 67(%r13), %eax
jmp .LBB2_8
.LBB2_7:
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movq %r12, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_9: # =>This Inner Loop Header: Depth=1
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r12
cmpq $10, %r12
jne .LBB2_9
# %bb.10:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB2_15
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i32
cmpb $0, 56(%r12)
je .LBB2_13
# %bb.12:
movzbl 67(%r12), %eax
jmp .LBB2_14
.LBB2_13:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit35
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB2_15:
.cfi_def_cfa_offset 48
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16MatVecMul_KernelPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16MatVecMul_KernelPfS_S_i,@object # @_Z16MatVecMul_KernelPfS_S_i
.section .rodata,"a",@progbits
.globl _Z16MatVecMul_KernelPfS_S_i
.p2align 3, 0x0
_Z16MatVecMul_KernelPfS_S_i:
.quad _Z31__device_stub__MatVecMul_KernelPfS_S_i
.size _Z16MatVecMul_KernelPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "MatVecMul_GPU: "
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "ns."
.size .L.str.1, 4
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " "
.size .L.str.2, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16MatVecMul_KernelPfS_S_i"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__MatVecMul_KernelPfS_S_i
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16MatVecMul_KernelPfS_S_i
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16MatVecMul_KernelPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x001fda0003f06270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */
/* 0x000fe200078e00ff */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0060*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */
/* 0x000fc600000001ff */
/*0070*/ ISETP.GE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fda0003f06270 */
/*0080*/ @!P0 BRA 0xa50 ; /* 0x000009c000008947 */
/* 0x000fea0003800000 */
/*0090*/ IADD3 R2, R6.reuse, -0x1, RZ ; /* 0xffffffff06027810 */
/* 0x040fe20007ffe0ff */
/*00a0*/ IMAD.MOV.U32 R15, RZ, RZ, RZ ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e00ff */
/*00b0*/ LOP3.LUT R6, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306067812 */
/* 0x000fe400078ec0ff */
/*00c0*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f06070 */
/*00d0*/ MOV R7, RZ ; /* 0x000000ff00077202 */
/* 0x000fd60000000f00 */
/*00e0*/ @!P0 BRA 0x910 ; /* 0x0000082000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R8, -R6, c[0x0][0x178], RZ ; /* 0x00005e0006087a10 */
/* 0x000fe20007ffe1ff */
/*0100*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0110*/ HFMA2.MMA R15, -RZ, RZ, 0, 0 ; /* 0x00000000ff0f7435 */
/* 0x000fe200000001ff */
/*0120*/ IMAD R2, R0, c[0x0][0x178], RZ ; /* 0x00005e0000027a24 */
/* 0x000fe200078e02ff */
/*0130*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f04270 */
/*0140*/ IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff077224 */
/* 0x000fe200078e00ff */
/*0150*/ MOV R4, c[0x0][0x168] ; /* 0x00005a0000047a02 */
/* 0x000fe20000000f00 */
/*0160*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */
/* 0x000fe400078e00ff */
/*0170*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fd000078e0203 */
/*0180*/ @!P0 BRA 0x790 ; /* 0x0000060000008947 */
/* 0x000fea0003800000 */
/*0190*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*01a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01b0*/ @!P1 BRA 0x550 ; /* 0x0000039000009947 */
/* 0x000fea0003800000 */
/*01c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*01d0*/ LDG.E R16, [R4.64] ; /* 0x0000000404107981 */
/* 0x000ea8000c1e1900 */
/*01e0*/ LDG.E R17, [R2.64] ; /* 0x0000000402117981 */
/* 0x000ea8000c1e1900 */
/*01f0*/ LDG.E R18, [R4.64+0x4] ; /* 0x0000040404127981 */
/* 0x000ee8000c1e1900 */
/*0200*/ LDG.E R25, [R2.64+0x4] ; /* 0x0000040402197981 */
/* 0x000ee8000c1e1900 */
/*0210*/ LDG.E R19, [R4.64+0x8] ; /* 0x0000080404137981 */
/* 0x000f28000c1e1900 */
/*0220*/ LDG.E R20, [R2.64+0x8] ; /* 0x0000080402147981 */
/* 0x000f28000c1e1900 */
/*0230*/ LDG.E R22, [R4.64+0xc] ; /* 0x00000c0404167981 */
/* 0x000f68000c1e1900 */
/*0240*/ LDG.E R21, [R2.64+0xc] ; /* 0x00000c0402157981 */
/* 0x000f68000c1e1900 */
/*0250*/ LDG.E R23, [R4.64+0x10] ; /* 0x0000100404177981 */
/* 0x000f68000c1e1900 */
/*0260*/ LDG.E R24, [R2.64+0x10] ; /* 0x0000100402187981 */
/* 0x000f68000c1e1900 */
/*0270*/ LDG.E R9, [R4.64+0x14] ; /* 0x0000140404097981 */
/* 0x000f68000c1e1900 */
/*0280*/ LDG.E R10, [R2.64+0x14] ; /* 0x00001404020a7981 */
/* 0x000f68000c1e1900 */
/*0290*/ LDG.E R11, [R4.64+0x18] ; /* 0x00001804040b7981 */
/* 0x000f68000c1e1900 */
/*02a0*/ LDG.E R12, [R2.64+0x18] ; /* 0x00001804020c7981 */
/* 0x000f68000c1e1900 */
/*02b0*/ LDG.E R13, [R4.64+0x1c] ; /* 0x00001c04040d7981 */
/* 0x000f68000c1e1900 */
/*02c0*/ LDG.E R14, [R2.64+0x1c] ; /* 0x00001c04020e7981 */
/* 0x000f62000c1e1900 */
/*02d0*/ FFMA R17, R16, R17, R15 ; /* 0x0000001110117223 */
/* 0x004fc6000000000f */
/*02e0*/ LDG.E R15, [R4.64+0x20] ; /* 0x00002004040f7981 */
/* 0x0000a8000c1e1900 */
/*02f0*/ LDG.E R16, [R2.64+0x20] ; /* 0x0000200402107981 */
/* 0x0002a2000c1e1900 */
/*0300*/ FFMA R25, R18, R25, R17 ; /* 0x0000001912197223 */
/* 0x008fc60000000011 */
/*0310*/ LDG.E R17, [R4.64+0x24] ; /* 0x0000240404117981 */
/* 0x0000e8000c1e1900 */
/*0320*/ LDG.E R18, [R2.64+0x24] ; /* 0x0000240402127981 */
/* 0x0002e2000c1e1900 */
/*0330*/ FFMA R25, R19, R20, R25 ; /* 0x0000001413197223 */
/* 0x010fc60000000019 */
/*0340*/ LDG.E R19, [R4.64+0x28] ; /* 0x0000280404137981 */
/* 0x000128000c1e1900 */
/*0350*/ LDG.E R20, [R2.64+0x28] ; /* 0x0000280402147981 */
/* 0x000322000c1e1900 */
/*0360*/ FFMA R25, R22, R21, R25 ; /* 0x0000001516197223 */
/* 0x020fc60000000019 */
/*0370*/ LDG.E R21, [R4.64+0x2c] ; /* 0x00002c0404157981 */
/* 0x000168000c1e1900 */
/*0380*/ LDG.E R22, [R2.64+0x2c] ; /* 0x00002c0402167981 */
/* 0x000362000c1e1900 */
/*0390*/ FFMA R25, R23, R24, R25 ; /* 0x0000001817197223 */
/* 0x000fc60000000019 */
/*03a0*/ LDG.E R23, [R4.64+0x30] ; /* 0x0000300404177981 */
/* 0x000168000c1e1900 */
/*03b0*/ LDG.E R24, [R2.64+0x30] ; /* 0x0000300402187981 */
/* 0x000362000c1e1900 */
/*03c0*/ FFMA R25, R9, R10, R25 ; /* 0x0000000a09197223 */
/* 0x000fc60000000019 */
/*03d0*/ LDG.E R9, [R4.64+0x34] ; /* 0x0000340404097981 */
/* 0x000168000c1e1900 */
/*03e0*/ LDG.E R10, [R2.64+0x34] ; /* 0x00003404020a7981 */
/* 0x000362000c1e1900 */
/*03f0*/ FFMA R25, R11, R12, R25 ; /* 0x0000000c0b197223 */
/* 0x000fc60000000019 */
/*0400*/ LDG.E R11, [R4.64+0x38] ; /* 0x00003804040b7981 */
/* 0x000168000c1e1900 */
/*0410*/ LDG.E R12, [R2.64+0x38] ; /* 0x00003804020c7981 */
/* 0x000362000c1e1900 */
/*0420*/ FFMA R25, R13, R14, R25 ; /* 0x0000000e0d197223 */
/* 0x000fc60000000019 */
/*0430*/ LDG.E R13, [R4.64+0x3c] ; /* 0x00003c04040d7981 */
/* 0x000168000c1e1900 */
/*0440*/ LDG.E R14, [R2.64+0x3c] ; /* 0x00003c04020e7981 */
/* 0x000362000c1e1900 */
/*0450*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */
/* 0x000fe40007ffe0ff */
/*0460*/ IADD3 R7, R7, 0x10, RZ ; /* 0x0000001007077810 */
/* 0x000fe40007ffe0ff */
/*0470*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*0480*/ IADD3 R4, P3, R4, 0x40, RZ ; /* 0x0000004004047810 */
/* 0x001fc40007f7e0ff */
/*0490*/ IADD3 R2, P2, R2, 0x40, RZ ; /* 0x0000004002027810 */
/* 0x002fe40007f5e0ff */
/*04a0*/ IADD3.X R5, RZ, R5, RZ, P3, !PT ; /* 0x00000005ff057210 */
/* 0x000fc60001ffe4ff */
/*04b0*/ IMAD.X R3, RZ, RZ, R3, P2 ; /* 0x000000ffff037224 */
/* 0x000fe400010e0603 */
/*04c0*/ FFMA R15, R15, R16, R25 ; /* 0x000000100f0f7223 */
/* 0x004fc80000000019 */
/*04d0*/ FFMA R15, R17, R18, R15 ; /* 0x00000012110f7223 */
/* 0x008fc8000000000f */
/*04e0*/ FFMA R15, R19, R20, R15 ; /* 0x00000014130f7223 */
/* 0x010fc8000000000f */
/*04f0*/ FFMA R15, R21, R22, R15 ; /* 0x00000016150f7223 */
/* 0x020fc8000000000f */
/*0500*/ FFMA R15, R23, R24, R15 ; /* 0x00000018170f7223 */
/* 0x000fc8000000000f */
/*0510*/ FFMA R9, R9, R10, R15 ; /* 0x0000000a09097223 */
/* 0x000fc8000000000f */
/*0520*/ FFMA R9, R11, R12, R9 ; /* 0x0000000c0b097223 */
/* 0x000fc80000000009 */
/*0530*/ FFMA R15, R13, R14, R9 ; /* 0x0000000e0d0f7223 */
/* 0x000fe20000000009 */
/*0540*/ @P1 BRA 0x1d0 ; /* 0xfffffc8000001947 */
/* 0x000fea000383ffff */
/*0550*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f24270 */
/*0560*/ @!P1 BRA 0x770 ; /* 0x0000020000009947 */
/* 0x000fea0003800000 */
/*0570*/ LDG.E R16, [R4.64] ; /* 0x0000000404107981 */
/* 0x0000a8000c1e1900 */
/*0580*/ LDG.E R17, [R2.64] ; /* 0x0000000402117981 */
/* 0x000ea8000c1e1900 */
/*0590*/ LDG.E R19, [R4.64+0x4] ; /* 0x0000040404137981 */
/* 0x0000e8000c1e1900 */
/*05a0*/ LDG.E R18, [R2.64+0x4] ; /* 0x0000040402127981 */
/* 0x000ee8000c1e1900 */
/*05b0*/ LDG.E R21, [R4.64+0x8] ; /* 0x0000080404157981 */
/* 0x000128000c1e1900 */
/*05c0*/ LDG.E R20, [R2.64+0x8] ; /* 0x0000080402147981 */
/* 0x000f28000c1e1900 */
/*05d0*/ LDG.E R23, [R4.64+0xc] ; /* 0x00000c0404177981 */
/* 0x000168000c1e1900 */
/*05e0*/ LDG.E R22, [R2.64+0xc] ; /* 0x00000c0402167981 */
/* 0x000f68000c1e1900 */
/*05f0*/ LDG.E R25, [R4.64+0x10] ; /* 0x0000100404197981 */
/* 0x000168000c1e1900 */
/*0600*/ LDG.E R24, [R2.64+0x10] ; /* 0x0000100402187981 */
/* 0x000f68000c1e1900 */
/*0610*/ LDG.E R13, [R4.64+0x14] ; /* 0x00001404040d7981 */
/* 0x000168000c1e1900 */
/*0620*/ LDG.E R14, [R2.64+0x14] ; /* 0x00001404020e7981 */
/* 0x000f68000c1e1900 */
/*0630*/ LDG.E R11, [R4.64+0x18] ; /* 0x00001804040b7981 */
/* 0x000168000c1e1900 */
/*0640*/ LDG.E R12, [R2.64+0x18] ; /* 0x00001804020c7981 */
/* 0x000f68000c1e1900 */
/*0650*/ LDG.E R9, [R4.64+0x1c] ; /* 0x00001c0404097981 */
/* 0x000168000c1e1900 */
/*0660*/ LDG.E R10, [R2.64+0x1c] ; /* 0x00001c04020a7981 */
/* 0x000362000c1e1900 */
/*0670*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0680*/ IADD3 R7, R7, 0x8, RZ ; /* 0x0000000807077810 */
/* 0x000fe40007ffe0ff */
/*0690*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */
/* 0x000fe40007ffe0ff */
/*06a0*/ IADD3 R4, P2, R4, 0x20, RZ ; /* 0x0000002004047810 */
/* 0x001fca0007f5e0ff */
/*06b0*/ IMAD.X R5, RZ, RZ, R5, P2 ; /* 0x000000ffff057224 */
/* 0x000fe400010e0605 */
/*06c0*/ FFMA R16, R16, R17, R15 ; /* 0x0000001110107223 */
/* 0x004fc8000000000f */
/*06d0*/ FFMA R16, R19, R18, R16 ; /* 0x0000001213107223 */
/* 0x008fc80000000010 */
/*06e0*/ FFMA R16, R21, R20, R16 ; /* 0x0000001415107223 */
/* 0x010fc80000000010 */
/*06f0*/ FFMA R16, R23, R22, R16 ; /* 0x0000001617107223 */
/* 0x020fc80000000010 */
/*0700*/ FFMA R16, R25, R24, R16 ; /* 0x0000001819107223 */
/* 0x000fc80000000010 */
/*0710*/ FFMA R13, R13, R14, R16 ; /* 0x0000000e0d0d7223 */
/* 0x000fc80000000010 */
/*0720*/ FFMA R11, R11, R12, R13 ; /* 0x0000000c0b0b7223 */
/* 0x000fe2000000000d */
/*0730*/ IADD3 R12, P1, R2, 0x20, RZ ; /* 0x00000020020c7810 */
/* 0x000fc80007f3e0ff */
/*0740*/ IADD3.X R3, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff037210 */
/* 0x002fe40000ffe4ff */
/*0750*/ MOV R2, R12 ; /* 0x0000000c00027202 */
/* 0x000fe20000000f00 */
/*0760*/ FFMA R15, R9, R10, R11 ; /* 0x0000000a090f7223 */
/* 0x000fe4000000000b */
/*0770*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fda0000705670 */
/*0780*/ @!P0 BRA 0x910 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*0790*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */
/* 0x000ea8000c1e1900 */
/*07a0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ea8000c1e1900 */
/*07b0*/ LDG.E R12, [R4.64+0x4] ; /* 0x00000404040c7981 */
/* 0x0000e8000c1e1900 */
/*07c0*/ LDG.E R11, [R2.64+0x4] ; /* 0x00000404020b7981 */
/* 0x000ee8000c1e1900 */
/*07d0*/ LDG.E R14, [R4.64+0x8] ; /* 0x00000804040e7981 */
/* 0x000128000c1e1900 */
/*07e0*/ LDG.E R13, [R2.64+0x8] ; /* 0x00000804020d7981 */
/* 0x000328000c1e1900 */
/*07f0*/ LDG.E R16, [R4.64+0xc] ; /* 0x00000c0404107981 */
/* 0x000168000c1e1900 */
/*0800*/ LDG.E R17, [R2.64+0xc] ; /* 0x00000c0402117981 */
/* 0x000362000c1e1900 */
/*0810*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */
/* 0x000fc40007ffe0ff */
/*0820*/ IADD3 R7, R7, 0x4, RZ ; /* 0x0000000407077810 */
/* 0x000fe40007ffe0ff */
/*0830*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f05270 */
/*0840*/ FFMA R9, R10, R9, R15 ; /* 0x000000090a097223 */
/* 0x004fe2000000000f */
/*0850*/ IADD3 R10, P2, R4, 0x10, RZ ; /* 0x00000010040a7810 */
/* 0x000fca0007f5e0ff */
/*0860*/ IMAD.MOV.U32 R4, RZ, RZ, R10 ; /* 0x000000ffff047224 */
/* 0x001fe400078e000a */
/*0870*/ FFMA R9, R12, R11, R9 ; /* 0x0000000b0c097223 */
/* 0x008fe20000000009 */
/*0880*/ IADD3 R12, P1, R2, 0x10, RZ ; /* 0x00000010020c7810 */
/* 0x000fe20007f3e0ff */
/*0890*/ IMAD.X R11, RZ, RZ, R5, P2 ; /* 0x000000ffff0b7224 */
/* 0x000fc800010e0605 */
/*08a0*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x002fe200078e000c */
/*08b0*/ MOV R5, R11 ; /* 0x0000000b00057202 */
/* 0x000fe20000000f00 */
/*08c0*/ FFMA R9, R14, R13, R9 ; /* 0x0000000d0e097223 */
/* 0x010fe20000000009 */
/*08d0*/ IADD3.X R13, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff0d7210 */
/* 0x000fc80000ffe4ff */
/*08e0*/ MOV R3, R13 ; /* 0x0000000d00037202 */
/* 0x000fe20000000f00 */
/*08f0*/ FFMA R15, R16, R17, R9 ; /* 0x00000011100f7223 */
/* 0x020fe20000000009 */
/*0900*/ @P0 BRA 0x790 ; /* 0xfffffe8000000947 */
/* 0x000fea000383ffff */
/*0910*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fda0003f05270 */
/*0920*/ @!P0 BRA 0xa50 ; /* 0x0000012000008947 */
/* 0x000fea0003800000 */
/*0930*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe400078e00ff */
/*0940*/ IMAD R4, R0, c[0x0][0x178], R7 ; /* 0x00005e0000047a24 */
/* 0x000fe400078e0207 */
/*0950*/ IMAD.WIDE R2, R7, R5, c[0x0][0x168] ; /* 0x00005a0007027625 */
/* 0x000fc800078e0205 */
/*0960*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe200078e0205 */
/*0970*/ MOV R8, R2 ; /* 0x0000000200087202 */
/* 0x000fc60000000f00 */
/*0980*/ IMAD.MOV.U32 R7, RZ, RZ, R4 ; /* 0x000000ffff077224 */
/* 0x000fc600078e0004 */
/*0990*/ MOV R2, R8 ; /* 0x0000000800027202 */
/* 0x000fe40000000f00 */
/*09a0*/ MOV R4, R7 ; /* 0x0000000700047202 */
/* 0x000fc80000000f00 */
/*09b0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x0000a8000c1e1900 */
/*09c0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x0002a2000c1e1900 */
/*09d0*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */
/* 0x000fe40007ffe0ff */
/*09e0*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x000fe40007f3e0ff */
/*09f0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f05270 */
/*0a00*/ IADD3 R7, P2, R7, 0x4, RZ ; /* 0x0000000407077810 */
/* 0x000fe20007f5e0ff */
/*0a10*/ IMAD.X R3, RZ, RZ, R3, P1 ; /* 0x000000ffff037224 */
/* 0x001fc600008e0603 */
/*0a20*/ IADD3.X R5, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff057210 */
/* 0x002fe200017fe4ff */
/*0a30*/ FFMA R15, R2, R4, R15 ; /* 0x00000004020f7223 */
/* 0x004fcc000000000f */
/*0a40*/ @P0 BRA 0x990 ; /* 0xffffff4000000947 */
/* 0x000fea000383ffff */
/*0a50*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fca0000000f00 */
/*0a60*/ IMAD.WIDE R2, R0, R3, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fca00078e0203 */
/*0a70*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x000fe2000c101904 */
/*0a80*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0a90*/ BRA 0xa90; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16MatVecMul_KernelPfS_S_i
.globl _Z16MatVecMul_KernelPfS_S_i
.p2align 8
.type _Z16MatVecMul_KernelPfS_S_i,@function
_Z16MatVecMul_KernelPfS_S_i:
s_load_b32 s2, s[0:1], 0x18
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s2, v0
s_cbranch_execz .LBB0_6
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v1, v0, s2
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
.LBB0_3:
global_load_b32 v1, v[3:4], off
s_load_b32 s3, s[6:7], 0x0
v_add_co_u32 v3, vcc_lo, v3, 4
s_add_i32 s2, s2, -1
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_add_u32 s6, s6, 4
s_addc_u32 s7, s7, 0
s_cmp_eq_u32 s2, 0
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v2, s3, v1
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v2, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v2, s[0:1]
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16MatVecMul_KernelPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 8
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16MatVecMul_KernelPfS_S_i, .Lfunc_end0-_Z16MatVecMul_KernelPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16MatVecMul_KernelPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 10
.sgpr_spill_count: 0
.symbol: _Z16MatVecMul_KernelPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0018df27_00000000-6_e2.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3778:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3778:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i
.type _Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i, @function
_Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i:
.LFB3800:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16MatVecMul_KernelPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3800:
.size _Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i, .-_Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i
.globl _Z16MatVecMul_KernelPfS_S_i
.type _Z16MatVecMul_KernelPfS_S_i, @function
_Z16MatVecMul_KernelPfS_S_i:
.LFB3801:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3801:
.size _Z16MatVecMul_KernelPfS_S_i, .-_Z16MatVecMul_KernelPfS_S_i
.globl _Z13MatVecMul_GPUPfS_S_i
.type _Z13MatVecMul_GPUPfS_S_i, @function
_Z13MatVecMul_GPUPfS_S_i:
.LFB3768:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r14
movq %rsi, %r13
movq %rdx, %r12
movl %ecx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl %ecx, %ebx
imull %ecx, %ebx
sall $2, %ebx
movslq %ebx, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leal 0(,%rbp,4), %ebx
movslq %ebx, %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $10, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl %ebp, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z41__device_stub__Z16MatVecMul_KernelPfS_S_iPfS_S_i
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3768:
.size _Z13MatVecMul_GPUPfS_S_i, .-_Z13MatVecMul_GPUPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "MatVecMul_GPU: "
.LC2:
.string "ns."
.LC3:
.string " "
.text
.globl main
.type main, @function
main:
.LFB3769:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl $400, %edi
call malloc@PLT
movq %rax, %rbx
movq %rax, 8(%rsp)
movl $40, %edi
call malloc@PLT
movq %rax, %r12
movl $40, %edi
call malloc@PLT
movq %rax, %rbp
movq %rbx, %rax
leaq 400(%rbx), %rdx
movss .LC0(%rip), %xmm0
.L18:
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L18
movl $0, %eax
movss .LC0(%rip), %xmm0
.L19:
movss %xmm0, (%r12,%rax)
movss %xmm0, 0(%rbp,%rax)
addq $4, %rax
cmpq $40, %rax
jne .L19
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r13
movl $10, %ecx
movq %rbp, %rdx
movq %r12, %rsi
movq 8(%rsp), %rdi
call _Z13MatVecMul_GPUPfS_S_i
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %rbx
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %rbx, %rsi
subq %r13, %rsi
call _ZNSo9_M_insertIlEERSoT_@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq %rbp, %rbx
leaq 40(%rbp), %r15
leaq _ZSt4cout(%rip), %r14
leaq .LC3(%rip), %r13
.L20:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r14, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $1, %edx
movq %r13, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %r15, %rbx
jne .L20
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 8(%rsp), %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movl $0, %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3769:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z16MatVecMul_KernelPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3803:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z16MatVecMul_KernelPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3803:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "e2.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z31__device_stub__MatVecMul_KernelPfS_S_i # -- Begin function _Z31__device_stub__MatVecMul_KernelPfS_S_i
.p2align 4, 0x90
.type _Z31__device_stub__MatVecMul_KernelPfS_S_i,@function
_Z31__device_stub__MatVecMul_KernelPfS_S_i: # @_Z31__device_stub__MatVecMul_KernelPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16MatVecMul_KernelPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__MatVecMul_KernelPfS_S_i, .Lfunc_end0-_Z31__device_stub__MatVecMul_KernelPfS_S_i
.cfi_endproc
# -- End function
.globl _Z13MatVecMul_GPUPfS_S_i # -- Begin function _Z13MatVecMul_GPUPfS_S_i
.p2align 4, 0x90
.type _Z13MatVecMul_GPUPfS_S_i,@function
_Z13MatVecMul_GPUPfS_S_i: # @_Z13MatVecMul_GPUPfS_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %r15d
movq %rdx, %rbx
movq %rsi, %r12
movq %rdi, %r14
leal (,%r15,4), %ebp
movl %ebp, %eax
imull %ecx, %eax
movslq %eax, %r13
leaq 24(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq %r14, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movslq %ebp, %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 9(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %r15d, 36(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z16MatVecMul_KernelPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z13MatVecMul_GPUPfS_S_i, .Lfunc_end1-_Z13MatVecMul_GPUPfS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $400, %edi # imm = 0x190
callq malloc
movq %rax, %rbx
movl $40, %edi
callq malloc
movq %rax, %r14
movl $40, %edi
callq malloc
movq %rax, %r15
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%rbx,%rax,4) # imm = 0x3F800000
incq %rax
cmpq $100, %rax
jne .LBB2_1
# %bb.2: # %.preheader.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_3: # %.preheader
# =>This Inner Loop Header: Depth=1
movl $1065353216, (%r14,%rax,4) # imm = 0x3F800000
movl $1065353216, (%r15,%rax,4) # imm = 0x3F800000
incq %rax
cmpq $10, %rax
jne .LBB2_3
# %bb.4:
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r12
movq %rbx, %rdi
movq %r14, %rsi
movq %r15, %rdx
movl $10, %ecx
callq _Z13MatVecMul_GPUPfS_S_i
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r13
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $15, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
subq %r12, %r13
movl $_ZSt4cout, %edi
movq %r13, %rsi
callq _ZNSo9_M_insertIlEERSoT_
movq %rax, %r12
movl $.L.str.1, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %r13
testq %r13, %r13
je .LBB2_15
# %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r13)
je .LBB2_7
# %bb.6:
movzbl 67(%r13), %eax
jmp .LBB2_8
.LBB2_7:
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movq %r12, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_9: # =>This Inner Loop Header: Depth=1
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r12
cmpq $10, %r12
jne .LBB2_9
# %bb.10:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB2_15
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i32
cmpb $0, 56(%r12)
je .LBB2_13
# %bb.12:
movzbl 67(%r12), %eax
jmp .LBB2_14
.LBB2_13:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit35
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB2_15:
.cfi_def_cfa_offset 48
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16MatVecMul_KernelPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16MatVecMul_KernelPfS_S_i,@object # @_Z16MatVecMul_KernelPfS_S_i
.section .rodata,"a",@progbits
.globl _Z16MatVecMul_KernelPfS_S_i
.p2align 3, 0x0
_Z16MatVecMul_KernelPfS_S_i:
.quad _Z31__device_stub__MatVecMul_KernelPfS_S_i
.size _Z16MatVecMul_KernelPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "MatVecMul_GPU: "
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "ns."
.size .L.str.1, 4
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " "
.size .L.str.2, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16MatVecMul_KernelPfS_S_i"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__MatVecMul_KernelPfS_S_i
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16MatVecMul_KernelPfS_S_i
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <algorithm>
#include <cstdlib>
#include <curand.h>
#include <curand_kernel.h>
// In the following section, define the prob distribution parameters
#define N_PARAMS 3
#define PARAM1 50.0f, 3.0f, 0.5f // format: LAMBDA, A, B
#define PARAM2 1.5f, 0.8f, 5.0f
// parameters saved as constants
unsigned int N_BYTES_PRM = N_PARAMS * sizeof(float); // size of parameter
unsigned int N_SIMS, N_BLK, N_THRD, N_BYTES_I, N_BYTES_F;
const unsigned int MAX_THREADS = 512; // max threads per block
// Calculate and return mean of an array of floats
float calcMean(float arr[], unsigned int const n) {
double sum = 0.0;
for (unsigned int i=0; i<n; i++) {
sum += (arr[i] / n);
}
return sum;
}
__host__ cudaEvent_t get_time(void) {
cudaEvent_t time;
cudaEventCreate(&time);
cudaEventRecord(time);
return time;
}
// Based on parameter, draw poisson random number as frequency of losses
__global__ void sim_freq(unsigned int *f_out, float *prm, unsigned int N) {
unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
float lambda = prm[0]; // lambda for poisson
curandState_t state; // initialize rand state
curand_init(tid, 0, 0, &state); // set seed to thread index
f_out[tid] = curand_poisson(&state, lambda); // save loss frequency
}
}
// Based on parameter and freq, draw and sum pareto loss amounts
__global__ void sim_severity(float *loss_out, unsigned int *freq, float *prm,
const unsigned int N) {
unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
double A = prm[1]; double B = prm[2]; // two pareto parameters
curandState_t state; // initialize rand state
curand_init(tid, 0, 0, &state); // set seed to thread index
double sum = 0.0;
double unif = 0.0; // temp var for storing uniform rand
for (int f=0; f < freq[tid]; f++) {
unif = curand_uniform_double(&state); // draw unif rand as double
sum += B / pow(1-unif, 1/A); // quantile function (inverse CDF)
}
loss_out[tid] = (float) sum; // sum of all losses
}
}
void asynch() {
return;
}
int main(int argc, char* argv[]) {
if (argc == 2) { // get number of simulations based on CMDLINE input
N_SIMS = atoi(argv[1]);
} else {
printf("Usage: %s [nSimulations].\n", argv[0]);
return EXIT_FAILURE;
}
N_BLK = N_SIMS / MAX_THREADS + 1; // min of one block
N_THRD = std::min(N_SIMS, MAX_THREADS); // num of threads per block
N_BYTES_F = N_SIMS * sizeof(float); // size of loss array
N_BYTES_I = N_SIMS * sizeof(unsigned int); // size of frequency array
printf("Running %u simulations ...\n", N_SIMS);
cudaStream_t s1, s2; // Create and initialize streams
cudaStreamCreate(&s1);
cudaStreamCreate(&s2);
// allocate and copy parameter to device
float h_prm1 [N_PARAMS] = {PARAM1};
float h_prm2 [N_PARAMS] = {PARAM2};
cudaHostRegister(h_prm1, N_BYTES_PRM, cudaHostRegisterDefault);
cudaHostRegister(h_prm2, N_BYTES_PRM, cudaHostRegisterDefault);
float *d_prm1, *d_prm2; // parameter on device memory
cudaMalloc((void **)&d_prm1, N_BYTES_PRM); // allocate and copy
cudaMalloc((void **)&d_prm2, N_BYTES_PRM);
cudaMemcpyAsync(d_prm1, h_prm1, N_BYTES_PRM, cudaMemcpyHostToDevice, s1);
cudaMemcpyAsync(d_prm2, h_prm2, N_BYTES_PRM, cudaMemcpyHostToDevice, s2);
unsigned int *h_freq1, *d_freq1, *h_freq2, *d_freq2; // frequency arrays
float *h_loss1, *d_loss1, *h_loss2, *d_loss2; // loss arrays
cudaMalloc((void **)&d_freq1, N_BYTES_I); // device mem for freq and loss
cudaMalloc((void **)&d_loss1, N_BYTES_F);
cudaMalloc((void **)&d_freq2, N_BYTES_I);
cudaMalloc((void **)&d_loss2, N_BYTES_F);
cudaMallocHost((void**)&h_freq1, N_BYTES_I); // pinned host array
cudaMallocHost((void**)&h_loss1, N_BYTES_F);
cudaMallocHost((void**)&h_freq2, N_BYTES_I);
cudaMallocHost((void**)&h_loss2, N_BYTES_F);
float dur, mean1, mean2; // to record duration and averages
/****** asynchronus run ******
******************************/
cudaEvent_t start = get_time(); // start clock
cudaEvent_t copyEnd1, copyEnd2; // event to ensure copying loss is finished
cudaEventCreate(©End1); cudaEventCreate(©End2);
// first simulate frequency of losses in two streams
sim_freq<<<N_BLK, N_THRD, 0, s1>>>(d_freq1, d_prm1, N_SIMS);
sim_freq<<<N_BLK, N_THRD, 0, s2>>>(d_freq2, d_prm2, N_SIMS);
// based on frequency, draw pareto loss amounts and sum
sim_severity<<<N_BLK, N_THRD, 0, s1>>>(d_loss1, d_freq1, d_prm1, N_SIMS);
sim_severity<<<N_BLK, N_THRD, 0, s2>>>(d_loss2, d_freq2, d_prm2, N_SIMS);
cudaMemcpyAsync(h_loss1, d_loss1, N_BYTES_F, cudaMemcpyDeviceToHost, s1);
cudaEventRecord(copyEnd1, s1); // finish copying result in stream 1
cudaMemcpyAsync(h_loss2, d_loss2, N_BYTES_F, cudaMemcpyDeviceToHost, s2);
cudaEventRecord(copyEnd2, s2); // finish copying result in stream 2
cudaMemcpyAsync(h_freq1, d_freq1, N_BYTES_I, cudaMemcpyDeviceToHost, s1);
cudaMemcpyAsync(h_freq2, d_freq2, N_BYTES_I, cudaMemcpyDeviceToHost, s2);
cudaEventSynchronize(copyEnd1); // wait for result copy before calculation
mean1 = calcMean(h_loss1, N_SIMS);
cudaEventSynchronize(copyEnd2); // wait for result copy before calculation
mean2 = calcMean(h_loss2, N_SIMS);
cudaStreamSynchronize( s1 );
cudaStreamSynchronize( s2 );
cudaEvent_t stop = get_time(); // stop clock
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dur, start, stop);
printf("\tasynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n",
mean1, mean2, dur);
/****** synchronus run *******
******************************/
start = get_time();
sim_freq<<<N_BLK, N_THRD>>>(d_freq1, d_prm1, N_SIMS);
sim_severity<<<N_BLK, N_THRD>>>(d_loss1, d_freq1, d_prm1, N_SIMS);
cudaMemcpy(h_freq1, d_freq1, N_BYTES_I, cudaMemcpyDeviceToHost);
cudaMemcpy(h_loss1, d_loss1, N_BYTES_F, cudaMemcpyDeviceToHost);
sim_freq<<<N_BLK, N_THRD>>>(d_freq2, d_prm2, N_SIMS);
sim_severity<<<N_BLK, N_THRD>>>(d_loss2, d_freq2, d_prm2, N_SIMS);
cudaMemcpy(h_freq2, d_freq2, N_BYTES_I, cudaMemcpyDeviceToHost);
cudaMemcpy(h_loss2, d_loss2, N_BYTES_F, cudaMemcpyDeviceToHost);
mean1 = calcMean(h_loss1, N_SIMS);
mean2 = calcMean(h_loss2, N_SIMS);
stop = get_time(); // stop time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dur, start, stop);
printf("\tsynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n",
mean1, mean2, dur);
return EXIT_SUCCESS;
} | .file "tmpxft_0016f86c_00000000-6_stream_registers.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2558:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2558:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8calcMeanPfj
.type _Z8calcMeanPfj, @function
_Z8calcMeanPfj:
.LFB2551:
.cfi_startproc
endbr64
testl %esi, %esi
je .L8
movq %rdi, %rax
movl %esi, %edx
leaq (%rdi,%rdx,4), %rdx
pxor %xmm0, %xmm0
movl %esi, %esi
pxor %xmm2, %xmm2
cvtsi2ssq %rsi, %xmm2
.L10:
movss (%rax), %xmm1
divss %xmm2, %xmm1
cvtss2sd %xmm1, %xmm1
addsd %xmm1, %xmm0
addq $4, %rax
cmpq %rdx, %rax
jne .L10
.L4:
cvtsd2ss %xmm0, %xmm0
ret
.L8:
pxor %xmm0, %xmm0
jmp .L4
.cfi_endproc
.LFE2551:
.size _Z8calcMeanPfj, .-_Z8calcMeanPfj
.globl _Z8get_timev
.type _Z8get_timev, @function
_Z8get_timev:
.LFB2552:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq (%rsp), %rdi
call cudaEventRecord@PLT
movq (%rsp), %rax
movq 8(%rsp), %rdx
subq %fs:40, %rdx
jne .L14
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2552:
.size _Z8get_timev, .-_Z8get_timev
.globl _Z6asynchv
.type _Z6asynchv, @function
_Z6asynchv:
.LFB2553:
.cfi_startproc
endbr64
ret
.cfi_endproc
.LFE2553:
.size _Z6asynchv, .-_Z6asynchv
.globl _Z30__device_stub__Z8sim_freqPjPfjPjPfj
.type _Z30__device_stub__Z8sim_freqPjPfjPjPfj, @function
_Z30__device_stub__Z8sim_freqPjPfjPjPfj:
.LFB2580:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8sim_freqPjPfj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2580:
.size _Z30__device_stub__Z8sim_freqPjPfjPjPfj, .-_Z30__device_stub__Z8sim_freqPjPfjPjPfj
.globl _Z8sim_freqPjPfj
.type _Z8sim_freqPjPfj, @function
_Z8sim_freqPjPfj:
.LFB2581:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z8sim_freqPjPfjPjPfj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2581:
.size _Z8sim_freqPjPfj, .-_Z8sim_freqPjPfj
.globl _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
.type _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j, @function
_Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j:
.LFB2582:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L28
.L24:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L29
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12sim_severityPfPjS_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L24
.L29:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2582:
.size _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j, .-_Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
.globl _Z12sim_severityPfPjS_j
.type _Z12sim_severityPfPjS_j, @function
_Z12sim_severityPfPjS_j:
.LFB2583:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2583:
.size _Z12sim_severityPfPjS_j, .-_Z12sim_severityPfPjS_j
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Running %u simulations ...\n"
.LC8:
.string "Usage: %s [nSimulations].\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC9:
.string "\tasynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n"
.align 8
.LC10:
.string "\tsynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n"
.text
.globl main
.type main, @function
main:
.LFB2554:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $200, %rsp
.cfi_def_cfa_offset 224
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
cmpl $2, %edi
jne .L33
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %ecx
movl %eax, N_SIMS(%rip)
movl %eax, %edx
shrl $9, %edx
addl $1, %edx
movl %edx, N_BLK(%rip)
movl $512, %edx
cmpl %edx, %eax
cmovbe %eax, %edx
movl %edx, N_THRD(%rip)
sall $2, %eax
movl %eax, N_BYTES_F(%rip)
movl %eax, N_BYTES_I(%rip)
movl %ecx, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 24(%rsp), %rdi
call cudaStreamCreate@PLT
leaq 32(%rsp), %rdi
call cudaStreamCreate@PLT
movl $0x42480000, 160(%rsp)
movl $0x40400000, 164(%rsp)
movl $0x3f000000, 168(%rsp)
movl $0x3fc00000, 172(%rsp)
movl $0x3f4ccccd, 176(%rsp)
movl $0x40a00000, 180(%rsp)
movl N_BYTES_PRM(%rip), %esi
leaq 160(%rsp), %rbp
movl $0, %edx
movq %rbp, %rdi
call cudaHostRegister@PLT
movl N_BYTES_PRM(%rip), %esi
leaq 172(%rsp), %rbx
movl $0, %edx
movq %rbx, %rdi
call cudaHostRegister@PLT
movl N_BYTES_PRM(%rip), %esi
leaq 40(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_PRM(%rip), %esi
leaq 48(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_PRM(%rip), %edx
movq 24(%rsp), %r8
movl $1, %ecx
movq %rbp, %rsi
movq 40(%rsp), %rdi
call cudaMemcpyAsync@PLT
movl N_BYTES_PRM(%rip), %edx
movq 32(%rsp), %r8
movl $1, %ecx
movq %rbx, %rsi
movq 48(%rsp), %rdi
call cudaMemcpyAsync@PLT
movl N_BYTES_I(%rip), %esi
leaq 64(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_F(%rip), %esi
leaq 96(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_I(%rip), %esi
leaq 80(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_F(%rip), %esi
leaq 112(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_I(%rip), %esi
leaq 56(%rsp), %rdi
call cudaMallocHost@PLT
movl N_BYTES_F(%rip), %esi
leaq 88(%rsp), %rdi
call cudaMallocHost@PLT
movl N_BYTES_I(%rip), %esi
leaq 72(%rsp), %rdi
call cudaMallocHost@PLT
movl N_BYTES_F(%rip), %esi
leaq 104(%rsp), %rdi
call cudaMallocHost@PLT
call _Z8get_timev
movq %rax, %rbp
leaq 120(%rsp), %rdi
call cudaEventCreate@PLT
leaq 128(%rsp), %rdi
call cudaEventCreate@PLT
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movq 24(%rsp), %r9
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L47
.L34:
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movq 32(%rsp), %r9
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L48
.L37:
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movq 24(%rsp), %r9
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L49
.L38:
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movq 32(%rsp), %r9
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L50
.L39:
movl N_BYTES_F(%rip), %edx
movq 24(%rsp), %r8
movl $2, %ecx
movq 96(%rsp), %rsi
movq 88(%rsp), %rdi
call cudaMemcpyAsync@PLT
movq 24(%rsp), %rsi
movq 120(%rsp), %rdi
call cudaEventRecord@PLT
movl N_BYTES_F(%rip), %edx
movq 32(%rsp), %r8
movl $2, %ecx
movq 112(%rsp), %rsi
movq 104(%rsp), %rdi
call cudaMemcpyAsync@PLT
movq 32(%rsp), %rsi
movq 128(%rsp), %rdi
call cudaEventRecord@PLT
movl N_BYTES_I(%rip), %edx
movq 24(%rsp), %r8
movl $2, %ecx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpyAsync@PLT
movl N_BYTES_I(%rip), %edx
movq 32(%rsp), %r8
movl $2, %ecx
movq 80(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpyAsync@PLT
movq 120(%rsp), %rdi
call cudaEventSynchronize@PLT
movl N_SIMS(%rip), %esi
movq 88(%rsp), %rdi
call _Z8calcMeanPfj
movss %xmm0, 8(%rsp)
movq 128(%rsp), %rdi
call cudaEventSynchronize@PLT
movl N_SIMS(%rip), %esi
movq 104(%rsp), %rdi
call _Z8calcMeanPfj
movss %xmm0, 12(%rsp)
movq 24(%rsp), %rdi
call cudaStreamSynchronize@PLT
movq 32(%rsp), %rdi
call cudaStreamSynchronize@PLT
call _Z8get_timev
movq %rax, %rbx
movq %rax, %rdi
call cudaEventSynchronize@PLT
leaq 20(%rsp), %rdi
movq %rbx, %rdx
movq %rbp, %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 8(%rsp), %xmm0
pxor %xmm2, %xmm2
cvtss2sd 20(%rsp), %xmm2
pxor %xmm1, %xmm1
cvtss2sd 12(%rsp), %xmm1
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
call _Z8get_timev
movq %rax, %rbp
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L51
.L40:
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L41:
movl N_BYTES_I(%rip), %edx
movl $2, %ecx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl N_BYTES_F(%rip), %edx
movl $2, %ecx
movq 96(%rsp), %rsi
movq 88(%rsp), %rdi
call cudaMemcpy@PLT
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L42:
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L43:
movl N_BYTES_I(%rip), %edx
movl $2, %ecx
movq 80(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl N_BYTES_F(%rip), %edx
movl $2, %ecx
movq 112(%rsp), %rsi
movq 104(%rsp), %rdi
call cudaMemcpy@PLT
movl N_SIMS(%rip), %ebx
movl %ebx, %esi
movq 88(%rsp), %rdi
call _Z8calcMeanPfj
movss %xmm0, 8(%rsp)
movl %ebx, %esi
movq 104(%rsp), %rdi
call _Z8calcMeanPfj
movss %xmm0, 12(%rsp)
call _Z8get_timev
movq %rax, %rbx
movq %rax, %rdi
call cudaEventSynchronize@PLT
leaq 20(%rsp), %rdi
movq %rbx, %rdx
movq %rbp, %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 8(%rsp), %xmm0
pxor %xmm2, %xmm2
cvtss2sd 20(%rsp), %xmm2
pxor %xmm1, %xmm1
cvtss2sd 12(%rsp), %xmm1
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
movl $0, %eax
jmp .L32
.L33:
movq (%rsi), %rdx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
.L32:
movq 184(%rsp), %rdx
subq %fs:40, %rdx
jne .L55
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
movl N_SIMS(%rip), %edx
movq 40(%rsp), %rsi
movq 64(%rsp), %rdi
call _Z30__device_stub__Z8sim_freqPjPfjPjPfj
jmp .L34
.L48:
movl N_SIMS(%rip), %edx
movq 48(%rsp), %rsi
movq 80(%rsp), %rdi
call _Z30__device_stub__Z8sim_freqPjPfjPjPfj
jmp .L37
.L49:
movl N_SIMS(%rip), %ecx
movq 40(%rsp), %rdx
movq 64(%rsp), %rsi
movq 96(%rsp), %rdi
call _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
jmp .L38
.L50:
movl N_SIMS(%rip), %ecx
movq 48(%rsp), %rdx
movq 80(%rsp), %rsi
movq 112(%rsp), %rdi
call _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
jmp .L39
.L51:
movl N_SIMS(%rip), %edx
movq 40(%rsp), %rsi
movq 64(%rsp), %rdi
call _Z30__device_stub__Z8sim_freqPjPfjPjPfj
jmp .L40
.L52:
movl N_SIMS(%rip), %ecx
movq 40(%rsp), %rdx
movq 64(%rsp), %rsi
movq 96(%rsp), %rdi
call _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
jmp .L41
.L53:
movl N_SIMS(%rip), %edx
movq 48(%rsp), %rsi
movq 80(%rsp), %rdi
call _Z30__device_stub__Z8sim_freqPjPfjPjPfj
jmp .L42
.L54:
movl N_SIMS(%rip), %ecx
movq 48(%rsp), %rdx
movq 80(%rsp), %rsi
movq 112(%rsp), %rdi
call _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
jmp .L43
.L55:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2554:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "_Z12sim_severityPfPjS_j"
.LC12:
.string "_Z8sim_freqPjPfj"
.LC13:
.string "precalc_xorwow_matrix"
.LC14:
.string "precalc_xorwow_offset_matrix"
.LC15:
.string "mrg32k3aM1"
.LC16:
.string "mrg32k3aM2"
.LC17:
.string "mrg32k3aM1SubSeq"
.LC18:
.string "mrg32k3aM2SubSeq"
.LC19:
.string "mrg32k3aM1Seq"
.LC20:
.string "mrg32k3aM2Seq"
.LC21:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2585:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z12sim_severityPfPjS_j(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z8sim_freqPjPfj(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC20(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC21(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2585:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl N_BYTES_F
.bss
.align 4
.type N_BYTES_F, @object
.size N_BYTES_F, 4
N_BYTES_F:
.zero 4
.globl N_BYTES_I
.align 4
.type N_BYTES_I, @object
.size N_BYTES_I, 4
N_BYTES_I:
.zero 4
.globl N_THRD
.align 4
.type N_THRD, @object
.size N_THRD, 4
N_THRD:
.zero 4
.globl N_BLK
.align 4
.type N_BLK, @object
.size N_BLK, 4
N_BLK:
.zero 4
.globl N_SIMS
.align 4
.type N_SIMS, @object
.size N_SIMS, 4
N_SIMS:
.zero 4
.globl N_BYTES_PRM
.data
.align 4
.type N_BYTES_PRM, @object
.size N_BYTES_PRM, 4
N_BYTES_PRM:
.long 12
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <algorithm>
#include <cstdlib>
#include <curand.h>
#include <curand_kernel.h>
// In the following section, define the prob distribution parameters
#define N_PARAMS 3
#define PARAM1 50.0f, 3.0f, 0.5f // format: LAMBDA, A, B
#define PARAM2 1.5f, 0.8f, 5.0f
// parameters saved as constants
unsigned int N_BYTES_PRM = N_PARAMS * sizeof(float); // size of parameter
unsigned int N_SIMS, N_BLK, N_THRD, N_BYTES_I, N_BYTES_F;
const unsigned int MAX_THREADS = 512; // max threads per block
// Calculate and return mean of an array of floats
float calcMean(float arr[], unsigned int const n) {
double sum = 0.0;
for (unsigned int i=0; i<n; i++) {
sum += (arr[i] / n);
}
return sum;
}
__host__ cudaEvent_t get_time(void) {
cudaEvent_t time;
cudaEventCreate(&time);
cudaEventRecord(time);
return time;
}
// Based on parameter, draw poisson random number as frequency of losses
__global__ void sim_freq(unsigned int *f_out, float *prm, unsigned int N) {
unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
float lambda = prm[0]; // lambda for poisson
curandState_t state; // initialize rand state
curand_init(tid, 0, 0, &state); // set seed to thread index
f_out[tid] = curand_poisson(&state, lambda); // save loss frequency
}
}
// Based on parameter and freq, draw and sum pareto loss amounts
__global__ void sim_severity(float *loss_out, unsigned int *freq, float *prm,
const unsigned int N) {
unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
double A = prm[1]; double B = prm[2]; // two pareto parameters
curandState_t state; // initialize rand state
curand_init(tid, 0, 0, &state); // set seed to thread index
double sum = 0.0;
double unif = 0.0; // temp var for storing uniform rand
for (int f=0; f < freq[tid]; f++) {
unif = curand_uniform_double(&state); // draw unif rand as double
sum += B / pow(1-unif, 1/A); // quantile function (inverse CDF)
}
loss_out[tid] = (float) sum; // sum of all losses
}
}
void asynch() {
return;
}
int main(int argc, char* argv[]) {
if (argc == 2) { // get number of simulations based on CMDLINE input
N_SIMS = atoi(argv[1]);
} else {
printf("Usage: %s [nSimulations].\n", argv[0]);
return EXIT_FAILURE;
}
N_BLK = N_SIMS / MAX_THREADS + 1; // min of one block
N_THRD = std::min(N_SIMS, MAX_THREADS); // num of threads per block
N_BYTES_F = N_SIMS * sizeof(float); // size of loss array
N_BYTES_I = N_SIMS * sizeof(unsigned int); // size of frequency array
printf("Running %u simulations ...\n", N_SIMS);
cudaStream_t s1, s2; // Create and initialize streams
cudaStreamCreate(&s1);
cudaStreamCreate(&s2);
// allocate and copy parameter to device
float h_prm1 [N_PARAMS] = {PARAM1};
float h_prm2 [N_PARAMS] = {PARAM2};
cudaHostRegister(h_prm1, N_BYTES_PRM, cudaHostRegisterDefault);
cudaHostRegister(h_prm2, N_BYTES_PRM, cudaHostRegisterDefault);
float *d_prm1, *d_prm2; // parameter on device memory
cudaMalloc((void **)&d_prm1, N_BYTES_PRM); // allocate and copy
cudaMalloc((void **)&d_prm2, N_BYTES_PRM);
cudaMemcpyAsync(d_prm1, h_prm1, N_BYTES_PRM, cudaMemcpyHostToDevice, s1);
cudaMemcpyAsync(d_prm2, h_prm2, N_BYTES_PRM, cudaMemcpyHostToDevice, s2);
unsigned int *h_freq1, *d_freq1, *h_freq2, *d_freq2; // frequency arrays
float *h_loss1, *d_loss1, *h_loss2, *d_loss2; // loss arrays
cudaMalloc((void **)&d_freq1, N_BYTES_I); // device mem for freq and loss
cudaMalloc((void **)&d_loss1, N_BYTES_F);
cudaMalloc((void **)&d_freq2, N_BYTES_I);
cudaMalloc((void **)&d_loss2, N_BYTES_F);
cudaMallocHost((void**)&h_freq1, N_BYTES_I); // pinned host array
cudaMallocHost((void**)&h_loss1, N_BYTES_F);
cudaMallocHost((void**)&h_freq2, N_BYTES_I);
cudaMallocHost((void**)&h_loss2, N_BYTES_F);
float dur, mean1, mean2; // to record duration and averages
/****** asynchronus run ******
******************************/
cudaEvent_t start = get_time(); // start clock
cudaEvent_t copyEnd1, copyEnd2; // event to ensure copying loss is finished
cudaEventCreate(©End1); cudaEventCreate(©End2);
// first simulate frequency of losses in two streams
sim_freq<<<N_BLK, N_THRD, 0, s1>>>(d_freq1, d_prm1, N_SIMS);
sim_freq<<<N_BLK, N_THRD, 0, s2>>>(d_freq2, d_prm2, N_SIMS);
// based on frequency, draw pareto loss amounts and sum
sim_severity<<<N_BLK, N_THRD, 0, s1>>>(d_loss1, d_freq1, d_prm1, N_SIMS);
sim_severity<<<N_BLK, N_THRD, 0, s2>>>(d_loss2, d_freq2, d_prm2, N_SIMS);
cudaMemcpyAsync(h_loss1, d_loss1, N_BYTES_F, cudaMemcpyDeviceToHost, s1);
cudaEventRecord(copyEnd1, s1); // finish copying result in stream 1
cudaMemcpyAsync(h_loss2, d_loss2, N_BYTES_F, cudaMemcpyDeviceToHost, s2);
cudaEventRecord(copyEnd2, s2); // finish copying result in stream 2
cudaMemcpyAsync(h_freq1, d_freq1, N_BYTES_I, cudaMemcpyDeviceToHost, s1);
cudaMemcpyAsync(h_freq2, d_freq2, N_BYTES_I, cudaMemcpyDeviceToHost, s2);
cudaEventSynchronize(copyEnd1); // wait for result copy before calculation
mean1 = calcMean(h_loss1, N_SIMS);
cudaEventSynchronize(copyEnd2); // wait for result copy before calculation
mean2 = calcMean(h_loss2, N_SIMS);
cudaStreamSynchronize( s1 );
cudaStreamSynchronize( s2 );
cudaEvent_t stop = get_time(); // stop clock
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dur, start, stop);
printf("\tasynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n",
mean1, mean2, dur);
/****** synchronus run *******
******************************/
start = get_time();
sim_freq<<<N_BLK, N_THRD>>>(d_freq1, d_prm1, N_SIMS);
sim_severity<<<N_BLK, N_THRD>>>(d_loss1, d_freq1, d_prm1, N_SIMS);
cudaMemcpy(h_freq1, d_freq1, N_BYTES_I, cudaMemcpyDeviceToHost);
cudaMemcpy(h_loss1, d_loss1, N_BYTES_F, cudaMemcpyDeviceToHost);
sim_freq<<<N_BLK, N_THRD>>>(d_freq2, d_prm2, N_SIMS);
sim_severity<<<N_BLK, N_THRD>>>(d_loss2, d_freq2, d_prm2, N_SIMS);
cudaMemcpy(h_freq2, d_freq2, N_BYTES_I, cudaMemcpyDeviceToHost);
cudaMemcpy(h_loss2, d_loss2, N_BYTES_F, cudaMemcpyDeviceToHost);
mean1 = calcMean(h_loss1, N_SIMS);
mean2 = calcMean(h_loss2, N_SIMS);
stop = get_time(); // stop time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dur, start, stop);
printf("\tsynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n",
mean1, mean2, dur);
return EXIT_SUCCESS;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <algorithm>
#include <cstdlib>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
// In the following section, define the prob distribution parameters
#define N_PARAMS 3
#define PARAM1 50.0f, 3.0f, 0.5f // format: LAMBDA, A, B
#define PARAM2 1.5f, 0.8f, 5.0f
// parameters saved as constants
unsigned int N_BYTES_PRM = N_PARAMS * sizeof(float); // size of parameter
unsigned int N_SIMS, N_BLK, N_THRD, N_BYTES_I, N_BYTES_F;
const unsigned int MAX_THREADS = 512; // max threads per block
// Calculate and return mean of an array of floats
float calcMean(float arr[], unsigned int const n) {
double sum = 0.0;
for (unsigned int i=0; i<n; i++) {
sum += (arr[i] / n);
}
return sum;
}
__host__ hipEvent_t get_time(void) {
hipEvent_t time;
hipEventCreate(&time);
hipEventRecord(time);
return time;
}
// Based on parameter, draw poisson random number as frequency of losses
__global__ void sim_freq(unsigned int *f_out, float *prm, unsigned int N) {
unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
float lambda = prm[0]; // lambda for poisson
hiprandState_t state; // initialize rand state
hiprand_init(tid, 0, 0, &state); // set seed to thread index
f_out[tid] = hiprand_poisson(&state, lambda); // save loss frequency
}
}
// Based on parameter and freq, draw and sum pareto loss amounts
__global__ void sim_severity(float *loss_out, unsigned int *freq, float *prm,
const unsigned int N) {
unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
double A = prm[1]; double B = prm[2]; // two pareto parameters
hiprandState_t state; // initialize rand state
hiprand_init(tid, 0, 0, &state); // set seed to thread index
double sum = 0.0;
double unif = 0.0; // temp var for storing uniform rand
for (int f=0; f < freq[tid]; f++) {
unif = hiprand_uniform_double(&state); // draw unif rand as double
sum += B / pow(1-unif, 1/A); // quantile function (inverse CDF)
}
loss_out[tid] = (float) sum; // sum of all losses
}
}
void asynch() {
return;
}
int main(int argc, char* argv[]) {
if (argc == 2) { // get number of simulations based on CMDLINE input
N_SIMS = atoi(argv[1]);
} else {
printf("Usage: %s [nSimulations].\n", argv[0]);
return EXIT_FAILURE;
}
N_BLK = N_SIMS / MAX_THREADS + 1; // min of one block
N_THRD = std::min(N_SIMS, MAX_THREADS); // num of threads per block
N_BYTES_F = N_SIMS * sizeof(float); // size of loss array
N_BYTES_I = N_SIMS * sizeof(unsigned int); // size of frequency array
printf("Running %u simulations ...\n", N_SIMS);
hipStream_t s1, s2; // Create and initialize streams
hipStreamCreate(&s1);
hipStreamCreate(&s2);
// allocate and copy parameter to device
float h_prm1 [N_PARAMS] = {PARAM1};
float h_prm2 [N_PARAMS] = {PARAM2};
hipHostRegister(h_prm1, N_BYTES_PRM, hipHostRegisterDefault);
hipHostRegister(h_prm2, N_BYTES_PRM, hipHostRegisterDefault);
float *d_prm1, *d_prm2; // parameter on device memory
hipMalloc((void **)&d_prm1, N_BYTES_PRM); // allocate and copy
hipMalloc((void **)&d_prm2, N_BYTES_PRM);
hipMemcpyAsync(d_prm1, h_prm1, N_BYTES_PRM, hipMemcpyHostToDevice, s1);
hipMemcpyAsync(d_prm2, h_prm2, N_BYTES_PRM, hipMemcpyHostToDevice, s2);
unsigned int *h_freq1, *d_freq1, *h_freq2, *d_freq2; // frequency arrays
float *h_loss1, *d_loss1, *h_loss2, *d_loss2; // loss arrays
hipMalloc((void **)&d_freq1, N_BYTES_I); // device mem for freq and loss
hipMalloc((void **)&d_loss1, N_BYTES_F);
hipMalloc((void **)&d_freq2, N_BYTES_I);
hipMalloc((void **)&d_loss2, N_BYTES_F);
hipHostMalloc((void**)&h_freq1, N_BYTES_I, hipHostMallocDefault); // pinned host array
hipHostMalloc((void**)&h_loss1, N_BYTES_F, hipHostMallocDefault);
hipHostMalloc((void**)&h_freq2, N_BYTES_I, hipHostMallocDefault);
hipHostMalloc((void**)&h_loss2, N_BYTES_F, hipHostMallocDefault);
float dur, mean1, mean2; // to record duration and averages
/****** asynchronus run ******
******************************/
hipEvent_t start = get_time(); // start clock
hipEvent_t copyEnd1, copyEnd2; // event to ensure copying loss is finished
hipEventCreate(©End1); hipEventCreate(©End2);
// first simulate frequency of losses in two streams
sim_freq<<<N_BLK, N_THRD, 0, s1>>>(d_freq1, d_prm1, N_SIMS);
sim_freq<<<N_BLK, N_THRD, 0, s2>>>(d_freq2, d_prm2, N_SIMS);
// based on frequency, draw pareto loss amounts and sum
sim_severity<<<N_BLK, N_THRD, 0, s1>>>(d_loss1, d_freq1, d_prm1, N_SIMS);
sim_severity<<<N_BLK, N_THRD, 0, s2>>>(d_loss2, d_freq2, d_prm2, N_SIMS);
hipMemcpyAsync(h_loss1, d_loss1, N_BYTES_F, hipMemcpyDeviceToHost, s1);
hipEventRecord(copyEnd1, s1); // finish copying result in stream 1
hipMemcpyAsync(h_loss2, d_loss2, N_BYTES_F, hipMemcpyDeviceToHost, s2);
hipEventRecord(copyEnd2, s2); // finish copying result in stream 2
hipMemcpyAsync(h_freq1, d_freq1, N_BYTES_I, hipMemcpyDeviceToHost, s1);
hipMemcpyAsync(h_freq2, d_freq2, N_BYTES_I, hipMemcpyDeviceToHost, s2);
hipEventSynchronize(copyEnd1); // wait for result copy before calculation
mean1 = calcMean(h_loss1, N_SIMS);
hipEventSynchronize(copyEnd2); // wait for result copy before calculation
mean2 = calcMean(h_loss2, N_SIMS);
hipStreamSynchronize( s1 );
hipStreamSynchronize( s2 );
hipEvent_t stop = get_time(); // stop clock
hipEventSynchronize(stop);
hipEventElapsedTime(&dur, start, stop);
printf("\tasynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n",
mean1, mean2, dur);
/****** synchronus run *******
******************************/
start = get_time();
sim_freq<<<N_BLK, N_THRD>>>(d_freq1, d_prm1, N_SIMS);
sim_severity<<<N_BLK, N_THRD>>>(d_loss1, d_freq1, d_prm1, N_SIMS);
hipMemcpy(h_freq1, d_freq1, N_BYTES_I, hipMemcpyDeviceToHost);
hipMemcpy(h_loss1, d_loss1, N_BYTES_F, hipMemcpyDeviceToHost);
sim_freq<<<N_BLK, N_THRD>>>(d_freq2, d_prm2, N_SIMS);
sim_severity<<<N_BLK, N_THRD>>>(d_loss2, d_freq2, d_prm2, N_SIMS);
hipMemcpy(h_freq2, d_freq2, N_BYTES_I, hipMemcpyDeviceToHost);
hipMemcpy(h_loss2, d_loss2, N_BYTES_F, hipMemcpyDeviceToHost);
mean1 = calcMean(h_loss1, N_SIMS);
mean2 = calcMean(h_loss2, N_SIMS);
stop = get_time(); // stop time
hipEventSynchronize(stop);
hipEventElapsedTime(&dur, start, stop);
printf("\tsynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n",
mean1, mean2, dur);
return EXIT_SUCCESS;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <algorithm>
#include <cstdlib>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
// In the following section, define the prob distribution parameters
#define N_PARAMS 3
#define PARAM1 50.0f, 3.0f, 0.5f // format: LAMBDA, A, B
#define PARAM2 1.5f, 0.8f, 5.0f
// parameters saved as constants
unsigned int N_BYTES_PRM = N_PARAMS * sizeof(float); // size of parameter
unsigned int N_SIMS, N_BLK, N_THRD, N_BYTES_I, N_BYTES_F;
const unsigned int MAX_THREADS = 512; // max threads per block
// Calculate and return mean of an array of floats
float calcMean(float arr[], unsigned int const n) {
double sum = 0.0;
for (unsigned int i=0; i<n; i++) {
sum += (arr[i] / n);
}
return sum;
}
__host__ hipEvent_t get_time(void) {
hipEvent_t time;
hipEventCreate(&time);
hipEventRecord(time);
return time;
}
// Based on parameter, draw poisson random number as frequency of losses
__global__ void sim_freq(unsigned int *f_out, float *prm, unsigned int N) {
unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
float lambda = prm[0]; // lambda for poisson
hiprandState_t state; // initialize rand state
hiprand_init(tid, 0, 0, &state); // set seed to thread index
f_out[tid] = hiprand_poisson(&state, lambda); // save loss frequency
}
}
// Based on parameter and freq, draw and sum pareto loss amounts
__global__ void sim_severity(float *loss_out, unsigned int *freq, float *prm,
const unsigned int N) {
unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
double A = prm[1]; double B = prm[2]; // two pareto parameters
hiprandState_t state; // initialize rand state
hiprand_init(tid, 0, 0, &state); // set seed to thread index
double sum = 0.0;
double unif = 0.0; // temp var for storing uniform rand
for (int f=0; f < freq[tid]; f++) {
unif = hiprand_uniform_double(&state); // draw unif rand as double
sum += B / pow(1-unif, 1/A); // quantile function (inverse CDF)
}
loss_out[tid] = (float) sum; // sum of all losses
}
}
void asynch() {
return;
}
int main(int argc, char* argv[]) {
if (argc == 2) { // get number of simulations based on CMDLINE input
N_SIMS = atoi(argv[1]);
} else {
printf("Usage: %s [nSimulations].\n", argv[0]);
return EXIT_FAILURE;
}
N_BLK = N_SIMS / MAX_THREADS + 1; // min of one block
N_THRD = std::min(N_SIMS, MAX_THREADS); // num of threads per block
N_BYTES_F = N_SIMS * sizeof(float); // size of loss array
N_BYTES_I = N_SIMS * sizeof(unsigned int); // size of frequency array
printf("Running %u simulations ...\n", N_SIMS);
hipStream_t s1, s2; // Create and initialize streams
hipStreamCreate(&s1);
hipStreamCreate(&s2);
// allocate and copy parameter to device
float h_prm1 [N_PARAMS] = {PARAM1};
float h_prm2 [N_PARAMS] = {PARAM2};
hipHostRegister(h_prm1, N_BYTES_PRM, hipHostRegisterDefault);
hipHostRegister(h_prm2, N_BYTES_PRM, hipHostRegisterDefault);
float *d_prm1, *d_prm2; // parameter on device memory
hipMalloc((void **)&d_prm1, N_BYTES_PRM); // allocate and copy
hipMalloc((void **)&d_prm2, N_BYTES_PRM);
hipMemcpyAsync(d_prm1, h_prm1, N_BYTES_PRM, hipMemcpyHostToDevice, s1);
hipMemcpyAsync(d_prm2, h_prm2, N_BYTES_PRM, hipMemcpyHostToDevice, s2);
unsigned int *h_freq1, *d_freq1, *h_freq2, *d_freq2; // frequency arrays
float *h_loss1, *d_loss1, *h_loss2, *d_loss2; // loss arrays
hipMalloc((void **)&d_freq1, N_BYTES_I); // device mem for freq and loss
hipMalloc((void **)&d_loss1, N_BYTES_F);
hipMalloc((void **)&d_freq2, N_BYTES_I);
hipMalloc((void **)&d_loss2, N_BYTES_F);
hipHostMalloc((void**)&h_freq1, N_BYTES_I, hipHostMallocDefault); // pinned host array
hipHostMalloc((void**)&h_loss1, N_BYTES_F, hipHostMallocDefault);
hipHostMalloc((void**)&h_freq2, N_BYTES_I, hipHostMallocDefault);
hipHostMalloc((void**)&h_loss2, N_BYTES_F, hipHostMallocDefault);
float dur, mean1, mean2; // to record duration and averages
/****** asynchronus run ******
******************************/
hipEvent_t start = get_time(); // start clock
hipEvent_t copyEnd1, copyEnd2; // event to ensure copying loss is finished
hipEventCreate(©End1); hipEventCreate(©End2);
// first simulate frequency of losses in two streams
sim_freq<<<N_BLK, N_THRD, 0, s1>>>(d_freq1, d_prm1, N_SIMS);
sim_freq<<<N_BLK, N_THRD, 0, s2>>>(d_freq2, d_prm2, N_SIMS);
// based on frequency, draw pareto loss amounts and sum
sim_severity<<<N_BLK, N_THRD, 0, s1>>>(d_loss1, d_freq1, d_prm1, N_SIMS);
sim_severity<<<N_BLK, N_THRD, 0, s2>>>(d_loss2, d_freq2, d_prm2, N_SIMS);
hipMemcpyAsync(h_loss1, d_loss1, N_BYTES_F, hipMemcpyDeviceToHost, s1);
hipEventRecord(copyEnd1, s1); // finish copying result in stream 1
hipMemcpyAsync(h_loss2, d_loss2, N_BYTES_F, hipMemcpyDeviceToHost, s2);
hipEventRecord(copyEnd2, s2); // finish copying result in stream 2
hipMemcpyAsync(h_freq1, d_freq1, N_BYTES_I, hipMemcpyDeviceToHost, s1);
hipMemcpyAsync(h_freq2, d_freq2, N_BYTES_I, hipMemcpyDeviceToHost, s2);
hipEventSynchronize(copyEnd1); // wait for result copy before calculation
mean1 = calcMean(h_loss1, N_SIMS);
hipEventSynchronize(copyEnd2); // wait for result copy before calculation
mean2 = calcMean(h_loss2, N_SIMS);
hipStreamSynchronize( s1 );
hipStreamSynchronize( s2 );
hipEvent_t stop = get_time(); // stop clock
hipEventSynchronize(stop);
hipEventElapsedTime(&dur, start, stop);
printf("\tasynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n",
mean1, mean2, dur);
/****** synchronus run *******
******************************/
start = get_time();
sim_freq<<<N_BLK, N_THRD>>>(d_freq1, d_prm1, N_SIMS);
sim_severity<<<N_BLK, N_THRD>>>(d_loss1, d_freq1, d_prm1, N_SIMS);
hipMemcpy(h_freq1, d_freq1, N_BYTES_I, hipMemcpyDeviceToHost);
hipMemcpy(h_loss1, d_loss1, N_BYTES_F, hipMemcpyDeviceToHost);
sim_freq<<<N_BLK, N_THRD>>>(d_freq2, d_prm2, N_SIMS);
sim_severity<<<N_BLK, N_THRD>>>(d_loss2, d_freq2, d_prm2, N_SIMS);
hipMemcpy(h_freq2, d_freq2, N_BYTES_I, hipMemcpyDeviceToHost);
hipMemcpy(h_loss2, d_loss2, N_BYTES_F, hipMemcpyDeviceToHost);
mean1 = calcMean(h_loss1, N_SIMS);
mean2 = calcMean(h_loss2, N_SIMS);
stop = get_time(); // stop time
hipEventSynchronize(stop);
hipEventElapsedTime(&dur, start, stop);
printf("\tsynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n",
mean1, mean2, dur);
return EXIT_SUCCESS;
} | .text
.file "stream_registers.hip"
.globl _Z8calcMeanPfj # -- Begin function _Z8calcMeanPfj
.p2align 4, 0x90
.type _Z8calcMeanPfj,@function
_Z8calcMeanPfj: # @_Z8calcMeanPfj
.cfi_startproc
# %bb.0:
testl %esi, %esi
je .LBB0_1
# %bb.4: # %.lr.ph
movl %esi, %eax
cvtsi2ss %rax, %xmm1
xorpd %xmm0, %xmm0
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB0_5: # =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm1, %xmm2
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm0
incq %rcx
cmpq %rcx, %rax
jne .LBB0_5
# %bb.2: # %._crit_edge.loopexit
cvtsd2ss %xmm0, %xmm0
retq
.LBB0_1:
xorps %xmm0, %xmm0
retq
.Lfunc_end0:
.size _Z8calcMeanPfj, .Lfunc_end0-_Z8calcMeanPfj
.cfi_endproc
# -- End function
.globl _Z8get_timev # -- Begin function _Z8get_timev
.p2align 4, 0x90
.type _Z8get_timev,@function
_Z8get_timev: # @_Z8get_timev
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movq %rsp, %rdi
callq hipEventCreate
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z8get_timev, .Lfunc_end1-_Z8get_timev
.cfi_endproc
# -- End function
.globl _Z23__device_stub__sim_freqPjPfj # -- Begin function _Z23__device_stub__sim_freqPjPfj
.p2align 4, 0x90
.type _Z23__device_stub__sim_freqPjPfj,@function
_Z23__device_stub__sim_freqPjPfj: # @_Z23__device_stub__sim_freqPjPfj
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sim_freqPjPfj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z23__device_stub__sim_freqPjPfj, .Lfunc_end2-_Z23__device_stub__sim_freqPjPfj
.cfi_endproc
# -- End function
.globl _Z27__device_stub__sim_severityPfPjS_j # -- Begin function _Z27__device_stub__sim_severityPfPjS_j
.p2align 4, 0x90
.type _Z27__device_stub__sim_severityPfPjS_j,@function
_Z27__device_stub__sim_severityPfPjS_j: # @_Z27__device_stub__sim_severityPfPjS_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sim_severityPfPjS_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z27__device_stub__sim_severityPfPjS_j, .Lfunc_end3-_Z27__device_stub__sim_severityPfPjS_j
.cfi_endproc
# -- End function
.globl _Z6asynchv # -- Begin function _Z6asynchv
.p2align 4, 0x90
.type _Z6asynchv,@function
_Z6asynchv: # @_Z6asynchv
.cfi_startproc
# %bb.0:
retq
.Lfunc_end4:
.size _Z6asynchv, .Lfunc_end4-_Z6asynchv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $272, %rsp # imm = 0x110
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
cmpl $2, %edi
jne .LBB5_35
# %bb.1:
movabsq $4294967296, %r15 # imm = 0x100000000
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, N_SIMS(%rip)
movl %eax, %ecx
shrl $9, %ecx
incl %ecx
movl %ecx, N_BLK(%rip)
cmpl $512, %eax # imm = 0x200
movl $512, %ecx # imm = 0x200
cmovbl %eax, %ecx
movl %ecx, N_THRD(%rip)
leal (,%rax,4), %ecx
movl %ecx, N_BYTES_F(%rip)
movl %ecx, N_BYTES_I(%rip)
movl $.L.str.1, %edi
movl %eax, %esi
xorl %eax, %eax
callq printf
leaq 120(%rsp), %rdi
callq hipStreamCreate
leaq 112(%rsp), %rdi
callq hipStreamCreate
movabsq $4629700418048884736, %rax # imm = 0x4040000042480000
movq %rax, 260(%rsp)
movl $1056964608, 268(%rsp) # imm = 0x3F000000
movabsq $4561245704529379328, %rax # imm = 0x3F4CCCCD3FC00000
movq %rax, 248(%rsp)
movl $1084227584, 256(%rsp) # imm = 0x40A00000
movl N_BYTES_PRM(%rip), %esi
leaq 260(%rsp), %r14
movq %r14, %rdi
xorl %edx, %edx
callq hipHostRegister
movl N_BYTES_PRM(%rip), %esi
leaq 248(%rsp), %rbx
movq %rbx, %rdi
xorl %edx, %edx
callq hipHostRegister
movl N_BYTES_PRM(%rip), %esi
leaq 160(%rsp), %rdi
callq hipMalloc
movl N_BYTES_PRM(%rip), %esi
leaq 152(%rsp), %rdi
callq hipMalloc
movq 160(%rsp), %rdi
movl N_BYTES_PRM(%rip), %edx
movq 120(%rsp), %r8
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpyAsync
movq 152(%rsp), %rdi
movl N_BYTES_PRM(%rip), %edx
movq 112(%rsp), %r8
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpyAsync
movl N_BYTES_I(%rip), %esi
leaq 144(%rsp), %rdi
callq hipMalloc
movl N_BYTES_F(%rip), %esi
leaq 192(%rsp), %rdi
callq hipMalloc
movl N_BYTES_I(%rip), %esi
leaq 136(%rsp), %rdi
callq hipMalloc
movl N_BYTES_F(%rip), %esi
leaq 176(%rsp), %rdi
callq hipMalloc
movl N_BYTES_I(%rip), %esi
leaq 240(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
movl N_BYTES_F(%rip), %esi
leaq 200(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
movl N_BYTES_I(%rip), %esi
leaq 232(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
movl N_BYTES_F(%rip), %esi
leaq 184(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
leaq 80(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 80(%rsp), %rbx
leaq 224(%rsp), %rdi
callq hipEventCreate
leaq 216(%rsp), %rdi
callq hipEventCreate
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
movq 120(%rsp), %r9
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_3
# %bb.2:
movq 144(%rsp), %rax
movq 160(%rsp), %rcx
movl N_SIMS(%rip), %edx
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movl %edx, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sim_freqPjPfj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_3:
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
movq 112(%rsp), %r9
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_5
# %bb.4:
movq 136(%rsp), %rax
movq 152(%rsp), %rcx
movl N_SIMS(%rip), %edx
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movl %edx, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sim_freqPjPfj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_5:
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
movq 120(%rsp), %r9
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_7
# %bb.6:
movq 192(%rsp), %rax
movq 144(%rsp), %rcx
movq 160(%rsp), %rdx
movl N_SIMS(%rip), %esi
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movq %rdx, 48(%rsp)
movl %esi, 76(%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 76(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sim_severityPfPjS_j, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_7:
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
movq 112(%rsp), %r9
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_9
# %bb.8:
movq 176(%rsp), %rax
movq 136(%rsp), %rcx
movq 152(%rsp), %rdx
movl N_SIMS(%rip), %esi
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movq %rdx, 48(%rsp)
movl %esi, 76(%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 76(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sim_severityPfPjS_j, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_9:
movq 200(%rsp), %rdi
movq 192(%rsp), %rsi
movl N_BYTES_F(%rip), %edx
movq 120(%rsp), %r8
movl $2, %ecx
callq hipMemcpyAsync
movq 224(%rsp), %rdi
movq 120(%rsp), %rsi
callq hipEventRecord
movq 184(%rsp), %rdi
movq 176(%rsp), %rsi
movl N_BYTES_F(%rip), %edx
movq 112(%rsp), %r8
movl $2, %ecx
callq hipMemcpyAsync
movq 216(%rsp), %rdi
movq 112(%rsp), %rsi
callq hipEventRecord
movq 240(%rsp), %rdi
movq 144(%rsp), %rsi
movl N_BYTES_I(%rip), %edx
movq 120(%rsp), %r8
movl $2, %ecx
callq hipMemcpyAsync
movq 232(%rsp), %rdi
movq 136(%rsp), %rsi
movl N_BYTES_I(%rip), %edx
movq 112(%rsp), %r8
movl $2, %ecx
callq hipMemcpyAsync
movq 224(%rsp), %rdi
callq hipEventSynchronize
movl N_SIMS(%rip), %eax
xorps %xmm0, %xmm0
movsd %xmm0, 168(%rsp) # 8-byte Spill
testq %rax, %rax
je .LBB5_13
# %bb.10: # %.lr.ph.i
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movq 200(%rsp), %rcx
xorpd %xmm1, %xmm1
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_11: # =>This Inner Loop Header: Depth=1
movss (%rcx,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm1
incq %rdx
cmpq %rdx, %rax
jne .LBB5_11
# %bb.12: # %._crit_edge.loopexit.i
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
.LBB5_13: # %_Z8calcMeanPfj.exit
movsd %xmm0, 208(%rsp) # 8-byte Spill
movq 216(%rsp), %rdi
callq hipEventSynchronize
movl N_SIMS(%rip), %eax
testq %rax, %rax
je .LBB5_17
# %bb.14: # %.lr.ph.i69
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movq 184(%rsp), %rcx
xorpd %xmm1, %xmm1
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_15: # =>This Inner Loop Header: Depth=1
movss (%rcx,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm1
incq %rdx
cmpq %rdx, %rax
jne .LBB5_15
# %bb.16: # %._crit_edge.loopexit.i75
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
movsd %xmm0, 168(%rsp) # 8-byte Spill
.LBB5_17: # %_Z8calcMeanPfj.exit77
movq 120(%rsp), %rdi
callq hipStreamSynchronize
movq 112(%rsp), %rdi
callq hipStreamSynchronize
leaq 80(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 80(%rsp), %r14
movq %r14, %rdi
callq hipEventSynchronize
leaq 132(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
callq hipEventElapsedTime
movss 132(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorps %xmm2, %xmm2
cvtss2sd %xmm0, %xmm2
movl $.L.str.2, %edi
movsd 208(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 168(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $3, %al
callq printf
leaq 80(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 80(%rsp), %rbx
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_19
# %bb.18:
movq 144(%rsp), %rax
movq 160(%rsp), %rcx
movl N_SIMS(%rip), %edx
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movl %edx, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sim_freqPjPfj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_19:
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_21
# %bb.20:
movq 192(%rsp), %rax
movq 144(%rsp), %rcx
movq 160(%rsp), %rdx
movl N_SIMS(%rip), %esi
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movq %rdx, 48(%rsp)
movl %esi, 76(%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 76(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sim_severityPfPjS_j, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_21:
movq 240(%rsp), %rdi
movq 144(%rsp), %rsi
movl N_BYTES_I(%rip), %edx
movl $2, %ecx
callq hipMemcpy
movq 200(%rsp), %rdi
movq 192(%rsp), %rsi
movl N_BYTES_F(%rip), %edx
movl $2, %ecx
callq hipMemcpy
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_23
# %bb.22:
movq 136(%rsp), %rax
movq 152(%rsp), %rcx
movl N_SIMS(%rip), %edx
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movl %edx, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sim_freqPjPfj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_23:
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_25
# %bb.24:
movq 176(%rsp), %rax
movq 136(%rsp), %rcx
movq 152(%rsp), %rdx
movl N_SIMS(%rip), %esi
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movq %rdx, 48(%rsp)
movl %esi, 76(%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 76(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sim_severityPfPjS_j, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_25:
movq 232(%rsp), %rdi
movq 136(%rsp), %rsi
movl N_BYTES_I(%rip), %edx
movl $2, %ecx
callq hipMemcpy
movq 184(%rsp), %rdi
movq 176(%rsp), %rsi
movl N_BYTES_F(%rip), %edx
movl $2, %ecx
callq hipMemcpy
movl N_SIMS(%rip), %eax
xorps %xmm3, %xmm3
xorps %xmm0, %xmm0
testq %rax, %rax
je .LBB5_29
# %bb.26: # %.lr.ph.i103
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movq 200(%rsp), %rcx
xorpd %xmm1, %xmm1
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_27: # =>This Inner Loop Header: Depth=1
movss (%rcx,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm1
incq %rdx
cmpq %rdx, %rax
jne .LBB5_27
# %bb.28: # %._crit_edge.loopexit.i109
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
.LBB5_29: # %_Z8calcMeanPfj.exit111
movsd %xmm0, 168(%rsp) # 8-byte Spill
testq %rax, %rax
je .LBB5_33
# %bb.30: # %.lr.ph.i113
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movq 184(%rsp), %rcx
xorpd %xmm1, %xmm1
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_31: # =>This Inner Loop Header: Depth=1
movss (%rcx,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm1
incq %rdx
cmpq %rdx, %rax
jne .LBB5_31
# %bb.32: # %._crit_edge.loopexit.i119
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
xorps %xmm3, %xmm3
cvtss2sd %xmm0, %xmm3
.LBB5_33: # %_Z8calcMeanPfj.exit121
movsd %xmm3, 208(%rsp) # 8-byte Spill
leaq 80(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %r14d, %r14d
xorl %esi, %esi
callq hipEventRecord
movq 80(%rsp), %r15
movq %r15, %rdi
callq hipEventSynchronize
leaq 132(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
callq hipEventElapsedTime
movss 132(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorps %xmm2, %xmm2
cvtss2sd %xmm0, %xmm2
movl $.L.str.3, %edi
movsd 168(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 208(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $3, %al
callq printf
jmp .LBB5_34
.LBB5_35:
movq (%rsi), %rsi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $1, %r14d
.LBB5_34:
movl %r14d, %eax
addq $272, %rsp # imm = 0x110
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8sim_freqPjPfj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12sim_severityPfPjS_j, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type N_BYTES_PRM,@object # @N_BYTES_PRM
.data
.globl N_BYTES_PRM
.p2align 2, 0x0
N_BYTES_PRM:
.long 12 # 0xc
.size N_BYTES_PRM, 4
.type N_SIMS,@object # @N_SIMS
.bss
.globl N_SIMS
.p2align 2, 0x0
N_SIMS:
.long 0 # 0x0
.size N_SIMS, 4
.type N_BLK,@object # @N_BLK
.globl N_BLK
.p2align 2, 0x0
N_BLK:
.long 0 # 0x0
.size N_BLK, 4
.type N_THRD,@object # @N_THRD
.globl N_THRD
.p2align 2, 0x0
N_THRD:
.long 0 # 0x0
.size N_THRD, 4
.type N_BYTES_I,@object # @N_BYTES_I
.globl N_BYTES_I
.p2align 2, 0x0
N_BYTES_I:
.long 0 # 0x0
.size N_BYTES_I, 4
.type N_BYTES_F,@object # @N_BYTES_F
.globl N_BYTES_F
.p2align 2, 0x0
N_BYTES_F:
.long 0 # 0x0
.size N_BYTES_F, 4
.type _Z8sim_freqPjPfj,@object # @_Z8sim_freqPjPfj
.section .rodata,"a",@progbits
.globl _Z8sim_freqPjPfj
.p2align 3, 0x0
_Z8sim_freqPjPfj:
.quad _Z23__device_stub__sim_freqPjPfj
.size _Z8sim_freqPjPfj, 8
.type _Z12sim_severityPfPjS_j,@object # @_Z12sim_severityPfPjS_j
.globl _Z12sim_severityPfPjS_j
.p2align 3, 0x0
_Z12sim_severityPfPjS_j:
.quad _Z27__device_stub__sim_severityPfPjS_j
.size _Z12sim_severityPfPjS_j, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Usage: %s [nSimulations].\n"
.size .L.str, 27
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Running %u simulations ...\n"
.size .L.str.1, 28
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\tasynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n"
.size .L.str.2, 59
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\tsynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n"
.size .L.str.3, 58
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8sim_freqPjPfj"
.size .L__unnamed_1, 17
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z12sim_severityPfPjS_j"
.size .L__unnamed_2, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__sim_freqPjPfj
.addrsig_sym _Z27__device_stub__sim_severityPfPjS_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8sim_freqPjPfj
.addrsig_sym _Z12sim_severityPfPjS_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0016f86c_00000000-6_stream_registers.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2558:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2558:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8calcMeanPfj
.type _Z8calcMeanPfj, @function
_Z8calcMeanPfj:
.LFB2551:
.cfi_startproc
endbr64
testl %esi, %esi
je .L8
movq %rdi, %rax
movl %esi, %edx
leaq (%rdi,%rdx,4), %rdx
pxor %xmm0, %xmm0
movl %esi, %esi
pxor %xmm2, %xmm2
cvtsi2ssq %rsi, %xmm2
.L10:
movss (%rax), %xmm1
divss %xmm2, %xmm1
cvtss2sd %xmm1, %xmm1
addsd %xmm1, %xmm0
addq $4, %rax
cmpq %rdx, %rax
jne .L10
.L4:
cvtsd2ss %xmm0, %xmm0
ret
.L8:
pxor %xmm0, %xmm0
jmp .L4
.cfi_endproc
.LFE2551:
.size _Z8calcMeanPfj, .-_Z8calcMeanPfj
.globl _Z8get_timev
.type _Z8get_timev, @function
_Z8get_timev:
.LFB2552:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq (%rsp), %rdi
call cudaEventRecord@PLT
movq (%rsp), %rax
movq 8(%rsp), %rdx
subq %fs:40, %rdx
jne .L14
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2552:
.size _Z8get_timev, .-_Z8get_timev
.globl _Z6asynchv
.type _Z6asynchv, @function
_Z6asynchv:
.LFB2553:
.cfi_startproc
endbr64
ret
.cfi_endproc
.LFE2553:
.size _Z6asynchv, .-_Z6asynchv
.globl _Z30__device_stub__Z8sim_freqPjPfjPjPfj
.type _Z30__device_stub__Z8sim_freqPjPfjPjPfj, @function
_Z30__device_stub__Z8sim_freqPjPfjPjPfj:
.LFB2580:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8sim_freqPjPfj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2580:
.size _Z30__device_stub__Z8sim_freqPjPfjPjPfj, .-_Z30__device_stub__Z8sim_freqPjPfjPjPfj
.globl _Z8sim_freqPjPfj
.type _Z8sim_freqPjPfj, @function
_Z8sim_freqPjPfj:
.LFB2581:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z8sim_freqPjPfjPjPfj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2581:
.size _Z8sim_freqPjPfj, .-_Z8sim_freqPjPfj
.globl _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
.type _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j, @function
_Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j:
.LFB2582:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L28
.L24:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L29
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12sim_severityPfPjS_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L24
.L29:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2582:
.size _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j, .-_Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
.globl _Z12sim_severityPfPjS_j
.type _Z12sim_severityPfPjS_j, @function
_Z12sim_severityPfPjS_j:
.LFB2583:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2583:
.size _Z12sim_severityPfPjS_j, .-_Z12sim_severityPfPjS_j
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Running %u simulations ...\n"
.LC8:
.string "Usage: %s [nSimulations].\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC9:
.string "\tasynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n"
.align 8
.LC10:
.string "\tsynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n"
.text
.globl main
.type main, @function
main:
.LFB2554:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $200, %rsp
.cfi_def_cfa_offset 224
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
cmpl $2, %edi
jne .L33
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %ecx
movl %eax, N_SIMS(%rip)
movl %eax, %edx
shrl $9, %edx
addl $1, %edx
movl %edx, N_BLK(%rip)
movl $512, %edx
cmpl %edx, %eax
cmovbe %eax, %edx
movl %edx, N_THRD(%rip)
sall $2, %eax
movl %eax, N_BYTES_F(%rip)
movl %eax, N_BYTES_I(%rip)
movl %ecx, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 24(%rsp), %rdi
call cudaStreamCreate@PLT
leaq 32(%rsp), %rdi
call cudaStreamCreate@PLT
movl $0x42480000, 160(%rsp)
movl $0x40400000, 164(%rsp)
movl $0x3f000000, 168(%rsp)
movl $0x3fc00000, 172(%rsp)
movl $0x3f4ccccd, 176(%rsp)
movl $0x40a00000, 180(%rsp)
movl N_BYTES_PRM(%rip), %esi
leaq 160(%rsp), %rbp
movl $0, %edx
movq %rbp, %rdi
call cudaHostRegister@PLT
movl N_BYTES_PRM(%rip), %esi
leaq 172(%rsp), %rbx
movl $0, %edx
movq %rbx, %rdi
call cudaHostRegister@PLT
movl N_BYTES_PRM(%rip), %esi
leaq 40(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_PRM(%rip), %esi
leaq 48(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_PRM(%rip), %edx
movq 24(%rsp), %r8
movl $1, %ecx
movq %rbp, %rsi
movq 40(%rsp), %rdi
call cudaMemcpyAsync@PLT
movl N_BYTES_PRM(%rip), %edx
movq 32(%rsp), %r8
movl $1, %ecx
movq %rbx, %rsi
movq 48(%rsp), %rdi
call cudaMemcpyAsync@PLT
movl N_BYTES_I(%rip), %esi
leaq 64(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_F(%rip), %esi
leaq 96(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_I(%rip), %esi
leaq 80(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_F(%rip), %esi
leaq 112(%rsp), %rdi
call cudaMalloc@PLT
movl N_BYTES_I(%rip), %esi
leaq 56(%rsp), %rdi
call cudaMallocHost@PLT
movl N_BYTES_F(%rip), %esi
leaq 88(%rsp), %rdi
call cudaMallocHost@PLT
movl N_BYTES_I(%rip), %esi
leaq 72(%rsp), %rdi
call cudaMallocHost@PLT
movl N_BYTES_F(%rip), %esi
leaq 104(%rsp), %rdi
call cudaMallocHost@PLT
call _Z8get_timev
movq %rax, %rbp
leaq 120(%rsp), %rdi
call cudaEventCreate@PLT
leaq 128(%rsp), %rdi
call cudaEventCreate@PLT
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movq 24(%rsp), %r9
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L47
.L34:
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movq 32(%rsp), %r9
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L48
.L37:
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movq 24(%rsp), %r9
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L49
.L38:
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movq 32(%rsp), %r9
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L50
.L39:
movl N_BYTES_F(%rip), %edx
movq 24(%rsp), %r8
movl $2, %ecx
movq 96(%rsp), %rsi
movq 88(%rsp), %rdi
call cudaMemcpyAsync@PLT
movq 24(%rsp), %rsi
movq 120(%rsp), %rdi
call cudaEventRecord@PLT
movl N_BYTES_F(%rip), %edx
movq 32(%rsp), %r8
movl $2, %ecx
movq 112(%rsp), %rsi
movq 104(%rsp), %rdi
call cudaMemcpyAsync@PLT
movq 32(%rsp), %rsi
movq 128(%rsp), %rdi
call cudaEventRecord@PLT
movl N_BYTES_I(%rip), %edx
movq 24(%rsp), %r8
movl $2, %ecx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpyAsync@PLT
movl N_BYTES_I(%rip), %edx
movq 32(%rsp), %r8
movl $2, %ecx
movq 80(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpyAsync@PLT
movq 120(%rsp), %rdi
call cudaEventSynchronize@PLT
movl N_SIMS(%rip), %esi
movq 88(%rsp), %rdi
call _Z8calcMeanPfj
movss %xmm0, 8(%rsp)
movq 128(%rsp), %rdi
call cudaEventSynchronize@PLT
movl N_SIMS(%rip), %esi
movq 104(%rsp), %rdi
call _Z8calcMeanPfj
movss %xmm0, 12(%rsp)
movq 24(%rsp), %rdi
call cudaStreamSynchronize@PLT
movq 32(%rsp), %rdi
call cudaStreamSynchronize@PLT
call _Z8get_timev
movq %rax, %rbx
movq %rax, %rdi
call cudaEventSynchronize@PLT
leaq 20(%rsp), %rdi
movq %rbx, %rdx
movq %rbp, %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 8(%rsp), %xmm0
pxor %xmm2, %xmm2
cvtss2sd 20(%rsp), %xmm2
pxor %xmm1, %xmm1
cvtss2sd 12(%rsp), %xmm1
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
call _Z8get_timev
movq %rax, %rbp
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L51
.L40:
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L41:
movl N_BYTES_I(%rip), %edx
movl $2, %ecx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl N_BYTES_F(%rip), %edx
movl $2, %ecx
movq 96(%rsp), %rsi
movq 88(%rsp), %rdi
call cudaMemcpy@PLT
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L42:
movl N_THRD(%rip), %eax
movl %eax, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl N_BLK(%rip), %eax
movl %eax, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 148(%rsp), %rdx
movl $1, %ecx
movq 136(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L43:
movl N_BYTES_I(%rip), %edx
movl $2, %ecx
movq 80(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl N_BYTES_F(%rip), %edx
movl $2, %ecx
movq 112(%rsp), %rsi
movq 104(%rsp), %rdi
call cudaMemcpy@PLT
movl N_SIMS(%rip), %ebx
movl %ebx, %esi
movq 88(%rsp), %rdi
call _Z8calcMeanPfj
movss %xmm0, 8(%rsp)
movl %ebx, %esi
movq 104(%rsp), %rdi
call _Z8calcMeanPfj
movss %xmm0, 12(%rsp)
call _Z8get_timev
movq %rax, %rbx
movq %rax, %rdi
call cudaEventSynchronize@PLT
leaq 20(%rsp), %rdi
movq %rbx, %rdx
movq %rbp, %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 8(%rsp), %xmm0
pxor %xmm2, %xmm2
cvtss2sd 20(%rsp), %xmm2
pxor %xmm1, %xmm1
cvtss2sd 12(%rsp), %xmm1
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
movl $0, %eax
jmp .L32
.L33:
movq (%rsi), %rdx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
.L32:
movq 184(%rsp), %rdx
subq %fs:40, %rdx
jne .L55
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
movl N_SIMS(%rip), %edx
movq 40(%rsp), %rsi
movq 64(%rsp), %rdi
call _Z30__device_stub__Z8sim_freqPjPfjPjPfj
jmp .L34
.L48:
movl N_SIMS(%rip), %edx
movq 48(%rsp), %rsi
movq 80(%rsp), %rdi
call _Z30__device_stub__Z8sim_freqPjPfjPjPfj
jmp .L37
.L49:
movl N_SIMS(%rip), %ecx
movq 40(%rsp), %rdx
movq 64(%rsp), %rsi
movq 96(%rsp), %rdi
call _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
jmp .L38
.L50:
movl N_SIMS(%rip), %ecx
movq 48(%rsp), %rdx
movq 80(%rsp), %rsi
movq 112(%rsp), %rdi
call _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
jmp .L39
.L51:
movl N_SIMS(%rip), %edx
movq 40(%rsp), %rsi
movq 64(%rsp), %rdi
call _Z30__device_stub__Z8sim_freqPjPfjPjPfj
jmp .L40
.L52:
movl N_SIMS(%rip), %ecx
movq 40(%rsp), %rdx
movq 64(%rsp), %rsi
movq 96(%rsp), %rdi
call _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
jmp .L41
.L53:
movl N_SIMS(%rip), %edx
movq 48(%rsp), %rsi
movq 80(%rsp), %rdi
call _Z30__device_stub__Z8sim_freqPjPfjPjPfj
jmp .L42
.L54:
movl N_SIMS(%rip), %ecx
movq 48(%rsp), %rdx
movq 80(%rsp), %rsi
movq 112(%rsp), %rdi
call _Z37__device_stub__Z12sim_severityPfPjS_jPfPjS_j
jmp .L43
.L55:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2554:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "_Z12sim_severityPfPjS_j"
.LC12:
.string "_Z8sim_freqPjPfj"
.LC13:
.string "precalc_xorwow_matrix"
.LC14:
.string "precalc_xorwow_offset_matrix"
.LC15:
.string "mrg32k3aM1"
.LC16:
.string "mrg32k3aM2"
.LC17:
.string "mrg32k3aM1SubSeq"
.LC18:
.string "mrg32k3aM2SubSeq"
.LC19:
.string "mrg32k3aM1Seq"
.LC20:
.string "mrg32k3aM2Seq"
.LC21:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2585:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z12sim_severityPfPjS_j(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z8sim_freqPjPfj(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC20(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC21(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2585:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl N_BYTES_F
.bss
.align 4
.type N_BYTES_F, @object
.size N_BYTES_F, 4
N_BYTES_F:
.zero 4
.globl N_BYTES_I
.align 4
.type N_BYTES_I, @object
.size N_BYTES_I, 4
N_BYTES_I:
.zero 4
.globl N_THRD
.align 4
.type N_THRD, @object
.size N_THRD, 4
N_THRD:
.zero 4
.globl N_BLK
.align 4
.type N_BLK, @object
.size N_BLK, 4
N_BLK:
.zero 4
.globl N_SIMS
.align 4
.type N_SIMS, @object
.size N_SIMS, 4
N_SIMS:
.zero 4
.globl N_BYTES_PRM
.data
.align 4
.type N_BYTES_PRM, @object
.size N_BYTES_PRM, 4
N_BYTES_PRM:
.long 12
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "stream_registers.hip"
.globl _Z8calcMeanPfj # -- Begin function _Z8calcMeanPfj
.p2align 4, 0x90
.type _Z8calcMeanPfj,@function
_Z8calcMeanPfj: # @_Z8calcMeanPfj
.cfi_startproc
# %bb.0:
testl %esi, %esi
je .LBB0_1
# %bb.4: # %.lr.ph
movl %esi, %eax
cvtsi2ss %rax, %xmm1
xorpd %xmm0, %xmm0
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB0_5: # =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm1, %xmm2
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm0
incq %rcx
cmpq %rcx, %rax
jne .LBB0_5
# %bb.2: # %._crit_edge.loopexit
cvtsd2ss %xmm0, %xmm0
retq
.LBB0_1:
xorps %xmm0, %xmm0
retq
.Lfunc_end0:
.size _Z8calcMeanPfj, .Lfunc_end0-_Z8calcMeanPfj
.cfi_endproc
# -- End function
.globl _Z8get_timev # -- Begin function _Z8get_timev
.p2align 4, 0x90
.type _Z8get_timev,@function
_Z8get_timev: # @_Z8get_timev
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movq %rsp, %rdi
callq hipEventCreate
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z8get_timev, .Lfunc_end1-_Z8get_timev
.cfi_endproc
# -- End function
.globl _Z23__device_stub__sim_freqPjPfj # -- Begin function _Z23__device_stub__sim_freqPjPfj
.p2align 4, 0x90
.type _Z23__device_stub__sim_freqPjPfj,@function
_Z23__device_stub__sim_freqPjPfj: # @_Z23__device_stub__sim_freqPjPfj
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sim_freqPjPfj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z23__device_stub__sim_freqPjPfj, .Lfunc_end2-_Z23__device_stub__sim_freqPjPfj
.cfi_endproc
# -- End function
.globl _Z27__device_stub__sim_severityPfPjS_j # -- Begin function _Z27__device_stub__sim_severityPfPjS_j
.p2align 4, 0x90
.type _Z27__device_stub__sim_severityPfPjS_j,@function
_Z27__device_stub__sim_severityPfPjS_j: # @_Z27__device_stub__sim_severityPfPjS_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sim_severityPfPjS_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z27__device_stub__sim_severityPfPjS_j, .Lfunc_end3-_Z27__device_stub__sim_severityPfPjS_j
.cfi_endproc
# -- End function
.globl _Z6asynchv # -- Begin function _Z6asynchv
.p2align 4, 0x90
.type _Z6asynchv,@function
_Z6asynchv: # @_Z6asynchv
.cfi_startproc
# %bb.0:
retq
.Lfunc_end4:
.size _Z6asynchv, .Lfunc_end4-_Z6asynchv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $272, %rsp # imm = 0x110
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
cmpl $2, %edi
jne .LBB5_35
# %bb.1:
movabsq $4294967296, %r15 # imm = 0x100000000
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, N_SIMS(%rip)
movl %eax, %ecx
shrl $9, %ecx
incl %ecx
movl %ecx, N_BLK(%rip)
cmpl $512, %eax # imm = 0x200
movl $512, %ecx # imm = 0x200
cmovbl %eax, %ecx
movl %ecx, N_THRD(%rip)
leal (,%rax,4), %ecx
movl %ecx, N_BYTES_F(%rip)
movl %ecx, N_BYTES_I(%rip)
movl $.L.str.1, %edi
movl %eax, %esi
xorl %eax, %eax
callq printf
leaq 120(%rsp), %rdi
callq hipStreamCreate
leaq 112(%rsp), %rdi
callq hipStreamCreate
movabsq $4629700418048884736, %rax # imm = 0x4040000042480000
movq %rax, 260(%rsp)
movl $1056964608, 268(%rsp) # imm = 0x3F000000
movabsq $4561245704529379328, %rax # imm = 0x3F4CCCCD3FC00000
movq %rax, 248(%rsp)
movl $1084227584, 256(%rsp) # imm = 0x40A00000
movl N_BYTES_PRM(%rip), %esi
leaq 260(%rsp), %r14
movq %r14, %rdi
xorl %edx, %edx
callq hipHostRegister
movl N_BYTES_PRM(%rip), %esi
leaq 248(%rsp), %rbx
movq %rbx, %rdi
xorl %edx, %edx
callq hipHostRegister
movl N_BYTES_PRM(%rip), %esi
leaq 160(%rsp), %rdi
callq hipMalloc
movl N_BYTES_PRM(%rip), %esi
leaq 152(%rsp), %rdi
callq hipMalloc
movq 160(%rsp), %rdi
movl N_BYTES_PRM(%rip), %edx
movq 120(%rsp), %r8
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpyAsync
movq 152(%rsp), %rdi
movl N_BYTES_PRM(%rip), %edx
movq 112(%rsp), %r8
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpyAsync
movl N_BYTES_I(%rip), %esi
leaq 144(%rsp), %rdi
callq hipMalloc
movl N_BYTES_F(%rip), %esi
leaq 192(%rsp), %rdi
callq hipMalloc
movl N_BYTES_I(%rip), %esi
leaq 136(%rsp), %rdi
callq hipMalloc
movl N_BYTES_F(%rip), %esi
leaq 176(%rsp), %rdi
callq hipMalloc
movl N_BYTES_I(%rip), %esi
leaq 240(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
movl N_BYTES_F(%rip), %esi
leaq 200(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
movl N_BYTES_I(%rip), %esi
leaq 232(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
movl N_BYTES_F(%rip), %esi
leaq 184(%rsp), %rdi
xorl %edx, %edx
callq hipHostMalloc
leaq 80(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 80(%rsp), %rbx
leaq 224(%rsp), %rdi
callq hipEventCreate
leaq 216(%rsp), %rdi
callq hipEventCreate
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
movq 120(%rsp), %r9
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_3
# %bb.2:
movq 144(%rsp), %rax
movq 160(%rsp), %rcx
movl N_SIMS(%rip), %edx
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movl %edx, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sim_freqPjPfj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_3:
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
movq 112(%rsp), %r9
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_5
# %bb.4:
movq 136(%rsp), %rax
movq 152(%rsp), %rcx
movl N_SIMS(%rip), %edx
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movl %edx, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sim_freqPjPfj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_5:
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
movq 120(%rsp), %r9
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_7
# %bb.6:
movq 192(%rsp), %rax
movq 144(%rsp), %rcx
movq 160(%rsp), %rdx
movl N_SIMS(%rip), %esi
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movq %rdx, 48(%rsp)
movl %esi, 76(%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 76(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sim_severityPfPjS_j, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_7:
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
movq 112(%rsp), %r9
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_9
# %bb.8:
movq 176(%rsp), %rax
movq 136(%rsp), %rcx
movq 152(%rsp), %rdx
movl N_SIMS(%rip), %esi
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movq %rdx, 48(%rsp)
movl %esi, 76(%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 76(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sim_severityPfPjS_j, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_9:
movq 200(%rsp), %rdi
movq 192(%rsp), %rsi
movl N_BYTES_F(%rip), %edx
movq 120(%rsp), %r8
movl $2, %ecx
callq hipMemcpyAsync
movq 224(%rsp), %rdi
movq 120(%rsp), %rsi
callq hipEventRecord
movq 184(%rsp), %rdi
movq 176(%rsp), %rsi
movl N_BYTES_F(%rip), %edx
movq 112(%rsp), %r8
movl $2, %ecx
callq hipMemcpyAsync
movq 216(%rsp), %rdi
movq 112(%rsp), %rsi
callq hipEventRecord
movq 240(%rsp), %rdi
movq 144(%rsp), %rsi
movl N_BYTES_I(%rip), %edx
movq 120(%rsp), %r8
movl $2, %ecx
callq hipMemcpyAsync
movq 232(%rsp), %rdi
movq 136(%rsp), %rsi
movl N_BYTES_I(%rip), %edx
movq 112(%rsp), %r8
movl $2, %ecx
callq hipMemcpyAsync
movq 224(%rsp), %rdi
callq hipEventSynchronize
movl N_SIMS(%rip), %eax
xorps %xmm0, %xmm0
movsd %xmm0, 168(%rsp) # 8-byte Spill
testq %rax, %rax
je .LBB5_13
# %bb.10: # %.lr.ph.i
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movq 200(%rsp), %rcx
xorpd %xmm1, %xmm1
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_11: # =>This Inner Loop Header: Depth=1
movss (%rcx,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm1
incq %rdx
cmpq %rdx, %rax
jne .LBB5_11
# %bb.12: # %._crit_edge.loopexit.i
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
.LBB5_13: # %_Z8calcMeanPfj.exit
movsd %xmm0, 208(%rsp) # 8-byte Spill
movq 216(%rsp), %rdi
callq hipEventSynchronize
movl N_SIMS(%rip), %eax
testq %rax, %rax
je .LBB5_17
# %bb.14: # %.lr.ph.i69
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movq 184(%rsp), %rcx
xorpd %xmm1, %xmm1
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_15: # =>This Inner Loop Header: Depth=1
movss (%rcx,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm1
incq %rdx
cmpq %rdx, %rax
jne .LBB5_15
# %bb.16: # %._crit_edge.loopexit.i75
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
movsd %xmm0, 168(%rsp) # 8-byte Spill
.LBB5_17: # %_Z8calcMeanPfj.exit77
movq 120(%rsp), %rdi
callq hipStreamSynchronize
movq 112(%rsp), %rdi
callq hipStreamSynchronize
leaq 80(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 80(%rsp), %r14
movq %r14, %rdi
callq hipEventSynchronize
leaq 132(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
callq hipEventElapsedTime
movss 132(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorps %xmm2, %xmm2
cvtss2sd %xmm0, %xmm2
movl $.L.str.2, %edi
movsd 208(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 168(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $3, %al
callq printf
leaq 80(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 80(%rsp), %rbx
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_19
# %bb.18:
movq 144(%rsp), %rax
movq 160(%rsp), %rcx
movl N_SIMS(%rip), %edx
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movl %edx, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sim_freqPjPfj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_19:
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_21
# %bb.20:
movq 192(%rsp), %rax
movq 144(%rsp), %rcx
movq 160(%rsp), %rdx
movl N_SIMS(%rip), %esi
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movq %rdx, 48(%rsp)
movl %esi, 76(%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 76(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sim_severityPfPjS_j, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_21:
movq 240(%rsp), %rdi
movq 144(%rsp), %rsi
movl N_BYTES_I(%rip), %edx
movl $2, %ecx
callq hipMemcpy
movq 200(%rsp), %rdi
movq 192(%rsp), %rsi
movl N_BYTES_F(%rip), %edx
movl $2, %ecx
callq hipMemcpy
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_23
# %bb.22:
movq 136(%rsp), %rax
movq 152(%rsp), %rcx
movl N_SIMS(%rip), %edx
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movl %edx, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8sim_freqPjPfj, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_23:
movl N_BLK(%rip), %edi
movl N_THRD(%rip), %edx
orq %r15, %rdi
orq %r15, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_25
# %bb.24:
movq 176(%rsp), %rax
movq 136(%rsp), %rcx
movq 152(%rsp), %rdx
movl N_SIMS(%rip), %esi
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movq %rdx, 48(%rsp)
movl %esi, 76(%rsp)
leaq 64(%rsp), %rax
movq %rax, 80(%rsp)
leaq 56(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 76(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sim_severityPfPjS_j, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_25:
movq 232(%rsp), %rdi
movq 136(%rsp), %rsi
movl N_BYTES_I(%rip), %edx
movl $2, %ecx
callq hipMemcpy
movq 184(%rsp), %rdi
movq 176(%rsp), %rsi
movl N_BYTES_F(%rip), %edx
movl $2, %ecx
callq hipMemcpy
movl N_SIMS(%rip), %eax
xorps %xmm3, %xmm3
xorps %xmm0, %xmm0
testq %rax, %rax
je .LBB5_29
# %bb.26: # %.lr.ph.i103
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movq 200(%rsp), %rcx
xorpd %xmm1, %xmm1
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_27: # =>This Inner Loop Header: Depth=1
movss (%rcx,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm1
incq %rdx
cmpq %rdx, %rax
jne .LBB5_27
# %bb.28: # %._crit_edge.loopexit.i109
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
cvtss2sd %xmm0, %xmm0
.LBB5_29: # %_Z8calcMeanPfj.exit111
movsd %xmm0, 168(%rsp) # 8-byte Spill
testq %rax, %rax
je .LBB5_33
# %bb.30: # %.lr.ph.i113
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movq 184(%rsp), %rcx
xorpd %xmm1, %xmm1
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_31: # =>This Inner Loop Header: Depth=1
movss (%rcx,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
addsd %xmm2, %xmm1
incq %rdx
cmpq %rdx, %rax
jne .LBB5_31
# %bb.32: # %._crit_edge.loopexit.i119
xorps %xmm0, %xmm0
cvtsd2ss %xmm1, %xmm0
xorps %xmm3, %xmm3
cvtss2sd %xmm0, %xmm3
.LBB5_33: # %_Z8calcMeanPfj.exit121
movsd %xmm3, 208(%rsp) # 8-byte Spill
leaq 80(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %r14d, %r14d
xorl %esi, %esi
callq hipEventRecord
movq 80(%rsp), %r15
movq %r15, %rdi
callq hipEventSynchronize
leaq 132(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
callq hipEventElapsedTime
movss 132(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorps %xmm2, %xmm2
cvtss2sd %xmm0, %xmm2
movl $.L.str.3, %edi
movsd 168(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 208(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $3, %al
callq printf
jmp .LBB5_34
.LBB5_35:
movq (%rsi), %rsi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $1, %r14d
.LBB5_34:
movl %r14d, %eax
addq $272, %rsp # imm = 0x110
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8sim_freqPjPfj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12sim_severityPfPjS_j, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type N_BYTES_PRM,@object # @N_BYTES_PRM
.data
.globl N_BYTES_PRM
.p2align 2, 0x0
N_BYTES_PRM:
.long 12 # 0xc
.size N_BYTES_PRM, 4
.type N_SIMS,@object # @N_SIMS
.bss
.globl N_SIMS
.p2align 2, 0x0
N_SIMS:
.long 0 # 0x0
.size N_SIMS, 4
.type N_BLK,@object # @N_BLK
.globl N_BLK
.p2align 2, 0x0
N_BLK:
.long 0 # 0x0
.size N_BLK, 4
.type N_THRD,@object # @N_THRD
.globl N_THRD
.p2align 2, 0x0
N_THRD:
.long 0 # 0x0
.size N_THRD, 4
.type N_BYTES_I,@object # @N_BYTES_I
.globl N_BYTES_I
.p2align 2, 0x0
N_BYTES_I:
.long 0 # 0x0
.size N_BYTES_I, 4
.type N_BYTES_F,@object # @N_BYTES_F
.globl N_BYTES_F
.p2align 2, 0x0
N_BYTES_F:
.long 0 # 0x0
.size N_BYTES_F, 4
.type _Z8sim_freqPjPfj,@object # @_Z8sim_freqPjPfj
.section .rodata,"a",@progbits
.globl _Z8sim_freqPjPfj
.p2align 3, 0x0
_Z8sim_freqPjPfj:
.quad _Z23__device_stub__sim_freqPjPfj
.size _Z8sim_freqPjPfj, 8
.type _Z12sim_severityPfPjS_j,@object # @_Z12sim_severityPfPjS_j
.globl _Z12sim_severityPfPjS_j
.p2align 3, 0x0
_Z12sim_severityPfPjS_j:
.quad _Z27__device_stub__sim_severityPfPjS_j
.size _Z12sim_severityPfPjS_j, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Usage: %s [nSimulations].\n"
.size .L.str, 27
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Running %u simulations ...\n"
.size .L.str.1, 28
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\tasynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n"
.size .L.str.2, 59
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "\tsynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n"
.size .L.str.3, 58
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z8sim_freqPjPfj"
.size .L__unnamed_1, 17
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z12sim_severityPfPjS_j"
.size .L__unnamed_2, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__sim_freqPjPfj
.addrsig_sym _Z27__device_stub__sim_severityPfPjS_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8sim_freqPjPfj
.addrsig_sym _Z12sim_severityPfPjS_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #define THREADS 256
__global__ void select_matches(
const unsigned* in_idx,
const int* in_dist,
const unsigned nfeat,
const unsigned nelem,
const int max_dist)
{
unsigned f = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sid = threadIdx.x * blockDim.y + threadIdx.y;
__shared__ int s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
// Reduce best matches and find the best of them all
for (unsigned i = blockDim.y / 2; i > 0; i >>= 1) {
if (threadIdx.y < i) {
int dist = s_dist[sid + i];
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = s_idx[sid + i];
}
__syncthreads();
}
}
} | code for sm_80
Function : _Z14select_matchesPKjPKijji
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ ULDC UR4, c[0x0][0x4] ; /* 0x0000010000047ab9 */
/* 0x000fe40000000800 */
/*0020*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*0030*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf05270 */
/*0040*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0050*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000e220000002200 */
/*0060*/ IMAD.U32 R0, RZ, RZ, UR4 ; /* 0x00000004ff007e24 */
/* 0x000fc6000f8e00ff */
/*0070*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e240000002100 */
/*0080*/ IMAD R2, R2, c[0x0][0x4], R7 ; /* 0x0000010002027a24 */
/* 0x001fc800078e0207 */
/*0090*/ IMAD.SHL.U32 R3, R2, 0x4, RZ ; /* 0x0000000402037824 */
/* 0x000fe400078e00ff */
/*00a0*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*00b0*/ @P0 BRA 0x150 ; /* 0x0000009000000947 */
/* 0x001fea0003800000 */
/*00c0*/ IMAD R6, R0, 0x4, R3 ; /* 0x0000000400067824 */
/* 0x000fe200078e0203 */
/*00d0*/ LDS R4, [R2.X4] ; /* 0x0000000002047984 */
/* 0x000fe20000004800 */
/*00e0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe60003800000 */
/*00f0*/ LDS R9, [R6] ; /* 0x0000000006097984 */
/* 0x000e240000000800 */
/*0100*/ ISETP.GE.AND P0, PT, R9, R4, PT ; /* 0x000000040900720c */
/* 0x001fda0003f06270 */
/*0110*/ @!P0 STS [R2.X4], R9 ; /* 0x0000000902008388 */
/* 0x000fe80000004800 */
/*0120*/ @!P0 LDS R5, [R6+0x400] ; /* 0x0004000006058984 */
/* 0x000e280000000800 */
/*0130*/ @!P0 STS [R2.X4+0x400], R5 ; /* 0x0004000502008388 */
/* 0x0011e80000004800 */
/*0140*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0150*/ SHF.R.U32.HI R0, RZ, 0x1, R0 ; /* 0x00000001ff007819 */
/* 0x000fc80000011600 */
/*0160*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0170*/ @P0 BRA 0xa0 ; /* 0xffffff2000000947 */
/* 0x000fea000383ffff */
/*0180*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0190*/ BRA 0x190; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #define THREADS 256
__global__ void select_matches(
const unsigned* in_idx,
const int* in_dist,
const unsigned nfeat,
const unsigned nelem,
const int max_dist)
{
unsigned f = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sid = threadIdx.x * blockDim.y + threadIdx.y;
__shared__ int s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
// Reduce best matches and find the best of them all
for (unsigned i = blockDim.y / 2; i > 0; i >>= 1) {
if (threadIdx.y < i) {
int dist = s_dist[sid + i];
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = s_idx[sid + i];
}
__syncthreads();
}
}
} | .file "tmpxft_0014f26e_00000000-6_select_matches.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji
.type _Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji, @function
_Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14select_matchesPKjPKijji(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji, .-_Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji
.globl _Z14select_matchesPKjPKijji
.type _Z14select_matchesPKjPKijji, @function
_Z14select_matchesPKjPKijji:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z14select_matchesPKjPKijji, .-_Z14select_matchesPKjPKijji
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z14select_matchesPKjPKijji"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14select_matchesPKjPKijji(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #define THREADS 256
__global__ void select_matches(
const unsigned* in_idx,
const int* in_dist,
const unsigned nfeat,
const unsigned nelem,
const int max_dist)
{
unsigned f = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sid = threadIdx.x * blockDim.y + threadIdx.y;
__shared__ int s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
// Reduce best matches and find the best of them all
for (unsigned i = blockDim.y / 2; i > 0; i >>= 1) {
if (threadIdx.y < i) {
int dist = s_dist[sid + i];
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = s_idx[sid + i];
}
__syncthreads();
}
}
} | #include <hip/hip_runtime.h>
#define THREADS 256
__global__ void select_matches(
const unsigned* in_idx,
const int* in_dist,
const unsigned nfeat,
const unsigned nelem,
const int max_dist)
{
unsigned f = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sid = threadIdx.x * blockDim.y + threadIdx.y;
__shared__ int s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
// Reduce best matches and find the best of them all
for (unsigned i = blockDim.y / 2; i > 0; i >>= 1) {
if (threadIdx.y < i) {
int dist = s_dist[sid + i];
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = s_idx[sid + i];
}
__syncthreads();
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#define THREADS 256
__global__ void select_matches(
const unsigned* in_idx,
const int* in_dist,
const unsigned nfeat,
const unsigned nelem,
const int max_dist)
{
unsigned f = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sid = threadIdx.x * blockDim.y + threadIdx.y;
__shared__ int s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
// Reduce best matches and find the best of them all
for (unsigned i = blockDim.y / 2; i > 0; i >>= 1) {
if (threadIdx.y < i) {
int dist = s_dist[sid + i];
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = s_idx[sid + i];
}
__syncthreads();
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14select_matchesPKjPKijji
.globl _Z14select_matchesPKjPKijji
.p2align 8
.type _Z14select_matchesPKjPKijji,@function
_Z14select_matchesPKjPKijji:
s_load_b32 s0, s[0:1], 0x2c
s_waitcnt lgkmcnt(0)
s_lshr_b32 s0, s0, 16
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lt_u32 s0, 2
s_cbranch_scc1 .LBB0_7
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_and_b32 s0, 0xffff, s0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u32_u24 v0, v0, s0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v2, 2, v0
v_add_nc_u32_e32 v3, 0x400, v2
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_4
.p2align 6
.LBB0_2:
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s2
s_cmp_lt_u32 s0, 4
s_mov_b32 s0, s1
s_cbranch_scc1 .LBB0_7
.LBB0_4:
s_lshr_b32 s1, s0, 1
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s1, v1
s_cbranch_execz .LBB0_3
v_add_nc_u32_e32 v4, s1, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b32_e32 v5, 2, v4
ds_load_b32 v4, v5 offset:1024
ds_load_b32 v6, v3
s_waitcnt lgkmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v4, v6
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_2
ds_load_b32 v5, v5
ds_store_b32 v3, v4
s_waitcnt lgkmcnt(1)
ds_store_b32 v2, v5
s_branch .LBB0_2
.LBB0_7:
s_set_inst_prefetch_distance 0x2
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14select_matchesPKjPKijji
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 3
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14select_matchesPKjPKijji, .Lfunc_end0-_Z14select_matchesPKjPKijji
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14select_matchesPKjPKijji
.private_segment_fixed_size: 0
.sgpr_count: 5
.sgpr_spill_count: 0
.symbol: _Z14select_matchesPKjPKijji.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#define THREADS 256
__global__ void select_matches(
const unsigned* in_idx,
const int* in_dist,
const unsigned nfeat,
const unsigned nelem,
const int max_dist)
{
unsigned f = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sid = threadIdx.x * blockDim.y + threadIdx.y;
__shared__ int s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
// Reduce best matches and find the best of them all
for (unsigned i = blockDim.y / 2; i > 0; i >>= 1) {
if (threadIdx.y < i) {
int dist = s_dist[sid + i];
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = s_idx[sid + i];
}
__syncthreads();
}
}
} | .text
.file "select_matches.hip"
.globl _Z29__device_stub__select_matchesPKjPKijji # -- Begin function _Z29__device_stub__select_matchesPKjPKijji
.p2align 4, 0x90
.type _Z29__device_stub__select_matchesPKjPKijji,@function
_Z29__device_stub__select_matchesPKjPKijji: # @_Z29__device_stub__select_matchesPKjPKijji
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14select_matchesPKjPKijji, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z29__device_stub__select_matchesPKjPKijji, .Lfunc_end0-_Z29__device_stub__select_matchesPKjPKijji
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14select_matchesPKjPKijji, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14select_matchesPKjPKijji,@object # @_Z14select_matchesPKjPKijji
.section .rodata,"a",@progbits
.globl _Z14select_matchesPKjPKijji
.p2align 3, 0x0
_Z14select_matchesPKjPKijji:
.quad _Z29__device_stub__select_matchesPKjPKijji
.size _Z14select_matchesPKjPKijji, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14select_matchesPKjPKijji"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__select_matchesPKjPKijji
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14select_matchesPKjPKijji
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z14select_matchesPKjPKijji
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ ULDC UR4, c[0x0][0x4] ; /* 0x0000010000047ab9 */
/* 0x000fe40000000800 */
/*0020*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*0030*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf05270 */
/*0040*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0050*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000e220000002200 */
/*0060*/ IMAD.U32 R0, RZ, RZ, UR4 ; /* 0x00000004ff007e24 */
/* 0x000fc6000f8e00ff */
/*0070*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e240000002100 */
/*0080*/ IMAD R2, R2, c[0x0][0x4], R7 ; /* 0x0000010002027a24 */
/* 0x001fc800078e0207 */
/*0090*/ IMAD.SHL.U32 R3, R2, 0x4, RZ ; /* 0x0000000402037824 */
/* 0x000fe400078e00ff */
/*00a0*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*00b0*/ @P0 BRA 0x150 ; /* 0x0000009000000947 */
/* 0x001fea0003800000 */
/*00c0*/ IMAD R6, R0, 0x4, R3 ; /* 0x0000000400067824 */
/* 0x000fe200078e0203 */
/*00d0*/ LDS R4, [R2.X4] ; /* 0x0000000002047984 */
/* 0x000fe20000004800 */
/*00e0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe60003800000 */
/*00f0*/ LDS R9, [R6] ; /* 0x0000000006097984 */
/* 0x000e240000000800 */
/*0100*/ ISETP.GE.AND P0, PT, R9, R4, PT ; /* 0x000000040900720c */
/* 0x001fda0003f06270 */
/*0110*/ @!P0 STS [R2.X4], R9 ; /* 0x0000000902008388 */
/* 0x000fe80000004800 */
/*0120*/ @!P0 LDS R5, [R6+0x400] ; /* 0x0004000006058984 */
/* 0x000e280000000800 */
/*0130*/ @!P0 STS [R2.X4+0x400], R5 ; /* 0x0004000502008388 */
/* 0x0011e80000004800 */
/*0140*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0150*/ SHF.R.U32.HI R0, RZ, 0x1, R0 ; /* 0x00000001ff007819 */
/* 0x000fc80000011600 */
/*0160*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0170*/ @P0 BRA 0xa0 ; /* 0xffffff2000000947 */
/* 0x000fea000383ffff */
/*0180*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0190*/ BRA 0x190; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14select_matchesPKjPKijji
.globl _Z14select_matchesPKjPKijji
.p2align 8
.type _Z14select_matchesPKjPKijji,@function
_Z14select_matchesPKjPKijji:
s_load_b32 s0, s[0:1], 0x2c
s_waitcnt lgkmcnt(0)
s_lshr_b32 s0, s0, 16
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lt_u32 s0, 2
s_cbranch_scc1 .LBB0_7
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_and_b32 s0, 0xffff, s0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u32_u24 v0, v0, s0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v2, 2, v0
v_add_nc_u32_e32 v3, 0x400, v2
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_4
.p2align 6
.LBB0_2:
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s2
s_cmp_lt_u32 s0, 4
s_mov_b32 s0, s1
s_cbranch_scc1 .LBB0_7
.LBB0_4:
s_lshr_b32 s1, s0, 1
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s1, v1
s_cbranch_execz .LBB0_3
v_add_nc_u32_e32 v4, s1, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b32_e32 v5, 2, v4
ds_load_b32 v4, v5 offset:1024
ds_load_b32 v6, v3
s_waitcnt lgkmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v4, v6
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_2
ds_load_b32 v5, v5
ds_store_b32 v3, v4
s_waitcnt lgkmcnt(1)
ds_store_b32 v2, v5
s_branch .LBB0_2
.LBB0_7:
s_set_inst_prefetch_distance 0x2
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14select_matchesPKjPKijji
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 3
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14select_matchesPKjPKijji, .Lfunc_end0-_Z14select_matchesPKjPKijji
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14select_matchesPKjPKijji
.private_segment_fixed_size: 0
.sgpr_count: 5
.sgpr_spill_count: 0
.symbol: _Z14select_matchesPKjPKijji.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0014f26e_00000000-6_select_matches.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji
.type _Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji, @function
_Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14select_matchesPKjPKijji(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji, .-_Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji
.globl _Z14select_matchesPKjPKijji
.type _Z14select_matchesPKjPKijji, @function
_Z14select_matchesPKjPKijji:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z14select_matchesPKjPKijjiPKjPKijji
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z14select_matchesPKjPKijji, .-_Z14select_matchesPKjPKijji
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z14select_matchesPKjPKijji"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14select_matchesPKjPKijji(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "select_matches.hip"
.globl _Z29__device_stub__select_matchesPKjPKijji # -- Begin function _Z29__device_stub__select_matchesPKjPKijji
.p2align 4, 0x90
.type _Z29__device_stub__select_matchesPKjPKijji,@function
_Z29__device_stub__select_matchesPKjPKijji: # @_Z29__device_stub__select_matchesPKjPKijji
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14select_matchesPKjPKijji, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z29__device_stub__select_matchesPKjPKijji, .Lfunc_end0-_Z29__device_stub__select_matchesPKjPKijji
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14select_matchesPKjPKijji, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14select_matchesPKjPKijji,@object # @_Z14select_matchesPKjPKijji
.section .rodata,"a",@progbits
.globl _Z14select_matchesPKjPKijji
.p2align 3, 0x0
_Z14select_matchesPKjPKijji:
.quad _Z29__device_stub__select_matchesPKjPKijji
.size _Z14select_matchesPKjPKijji, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14select_matchesPKjPKijji"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__select_matchesPKjPKijji
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14select_matchesPKjPKijji
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Copyright (C) 2018 NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#define TO_NEXT_MULT_P2(x,p) (((x)+((p)-1)) & ~(p-1))
__host__ __device__ ulonglong2 operator<<(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(0ull, a.x << (l-64));
} else {
b = make_ulonglong2(a.x << l, (a.y << l) | (a.x >> (8*sizeof(a.x)-l)));
}
return b;
}
__host__ __device__ ulonglong2 operator>>(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(a.y >> (l-64), 0ull);
} else {
b = make_ulonglong2((a.x >> l) | (a.y << (8*sizeof(a.y)-l)), a.y >> l);
}
return b;
}
__host__ __device__ ulonglong2 operator|(ulonglong2 a, ulonglong2 b) {
return make_ulonglong2(a.x | b.x, a.y | b.y);
}
template<int BDIM_X,
int MAXIOB,
int SH_BYTE_X_BL,
typename LDST_T>
__device__ void memcpy_d(const size_t n,
const unsigned char *__restrict__ src,
unsigned char *__restrict__ dst,
unsigned char *__restrict__ __sh) {
const int tid = threadIdx.x;
const unsigned long long srcULL = reinterpret_cast<unsigned long long>(src);
const unsigned long long dstULL = reinterpret_cast<unsigned long long>(dst);
int srcOff = (MAXIOB - srcULL) & (MAXIOB-1);
int dstOff = (MAXIOB - dstULL) & (MAXIOB-1);
const int ELXTH = SH_BYTE_X_BL/(BDIM_X*MAXIOB);
LDST_T *__ptrSH = reinterpret_cast<LDST_T *>(__sh);
if (srcOff == dstOff) {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = (n-srcOff) / sizeof(*__ptrLDG);
int remBytes = (n-srcOff) % sizeof(*__ptrLDG);
LDST_T __loc[ELXTH];
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (j*BDIM_X+tid < nread) {
__loc[j] = __ptrLDG[j*BDIM_X+tid];
}
}
for(int i = 0; i < nread; i += BDIM_X*ELXTH) {
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
__ptrSH[j*BDIM_X+tid] = __loc[j];
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + BDIM_X*ELXTH + j*BDIM_X + tid < nread) {
__loc[j] = __ptrLDG[i + BDIM_X*ELXTH + j*BDIM_X + tid];
}
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + j*BDIM_X + tid < nread) {
__ptrSTG[i + j*BDIM_X + tid] = __ptrSH[j*BDIM_X+tid];
}
}
}
if (tid < srcOff+remBytes) {
const int off = (tid < srcOff) ? tid : n-remBytes+tid-srcOff;
dst[off] = src[off];
}
} else {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = ((n-srcOff) / sizeof(*__ptrLDG));
int remBytes = ((n-srcOff) % sizeof(*__ptrLDG));
int lowShft, uppShft;
if (srcOff > dstOff) {
uppShft = (srcOff-dstOff)*8;
lowShft = (8*sizeof(*__ptrLDG)) - uppShft;
__ptrSTG++;
} else {
lowShft = (dstOff-srcOff)*8;
uppShft = (8*sizeof(*__ptrLDG)) - lowShft;
}
for(int i = 0; i < nread-1; i += BDIM_X) {
if (i+tid < nread-1) {
const LDST_T low = __ptrLDG[i+tid];
const LDST_T upp = __ptrLDG[i+tid+1];
__ptrSTG[i+tid] = (low >> lowShft) | (upp << uppShft);
}
}
remBytes += sizeof(*__ptrLDG);
if (srcOff > dstOff) {
dstOff += sizeof(*__ptrLDG);
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
} else {
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
}
}
}
template<int BDIM_X,
int MAXIOB>
__global__ void memcpy_k(const size_t *sizes,
const unsigned char *const __restrict__ *__restrict__ in,
unsigned char *__restrict__ *__restrict__ out) {
const int SH_BYTE_X_BL = 32768;
__shared__ unsigned char __sh[SH_BYTE_X_BL];
switch(MAXIOB) {
case 4:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned int>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 8:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned long long>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 16:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, ulonglong2>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
}
return;
}
#define NTHREADS 1024
void batched_d2d_memcpy(void** out_ptrs, void** in_ptrs, size_t* sizes, int num_copies, cudaStream_t stream)
{
memcpy_k<NTHREADS, 16><<<num_copies, NTHREADS, 0, stream>>>(sizes, (unsigned char**) in_ptrs, (unsigned char**) out_ptrs);
} | .file "tmpxft_001837f3_00000000-6_batched_memcpy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh, @function
_ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh:
.LFB2058:
.cfi_startproc
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsi, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
movq %rdx, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L5
.L1:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L1
.L6:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh, .-_ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh
.section .text._Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,"axG",@progbits,_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,comdat
.weak _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.type _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, @function
_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh:
.LFB2107:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2107:
.size _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, .-_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.text
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2036:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2036:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Zls10ulonglong2i
.type _Zls10ulonglong2i, @function
_Zls10ulonglong2i:
.LFB2027:
.cfi_startproc
endbr64
cmpl $64, %edx
jle .L12
leal -64(%rdx), %ecx
salq %cl, %rdi
movq %rdi, %rsi
movl $0, %eax
.L13:
movq %rsi, %rdx
ret
.L12:
movl %edx, %ecx
salq %cl, %rsi
movl $64, %ecx
subl %edx, %ecx
movq %rdi, %rax
shrq %cl, %rax
orq %rax, %rsi
movq %rdi, %rax
movl %edx, %ecx
salq %cl, %rax
jmp .L13
.cfi_endproc
.LFE2027:
.size _Zls10ulonglong2i, .-_Zls10ulonglong2i
.globl _Zrs10ulonglong2i
.type _Zrs10ulonglong2i, @function
_Zrs10ulonglong2i:
.LFB2028:
.cfi_startproc
endbr64
cmpl $64, %edx
jle .L15
leal -64(%rdx), %ecx
movq %rsi, %rax
shrq %cl, %rax
movl $0, %r8d
.L16:
movq %r8, %rdx
ret
.L15:
movq %rsi, %r8
movl %edx, %ecx
shrq %cl, %r8
shrq %cl, %rdi
movl $64, %ecx
subl %edx, %ecx
salq %cl, %rsi
movq %rdi, %rax
orq %rsi, %rax
jmp .L16
.cfi_endproc
.LFE2028:
.size _Zrs10ulonglong2i, .-_Zrs10ulonglong2i
.globl _Zor10ulonglong2S_
.type _Zor10ulonglong2S_, @function
_Zor10ulonglong2S_:
.LFB2029:
.cfi_startproc
endbr64
movq %rdx, %rax
orq %rcx, %rsi
movq %rsi, %rdx
orq %rdi, %rax
ret
.cfi_endproc
.LFE2029:
.size _Zor10ulonglong2S_, .-_Zor10ulonglong2S_
.globl _Z18batched_d2d_memcpyPPvS0_PmiP11CUstream_st
.type _Z18batched_d2d_memcpyPPvS0_PmiP11CUstream_st, @function
_Z18batched_d2d_memcpyPPvS0_PmiP11CUstream_st:
.LFB2033:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r12
movq %rsi, %rbp
movq %rdx, %rbx
movl $1024, 20(%rsp)
movl $1, 24(%rsp)
movl %ecx, 8(%rsp)
movl $1, 12(%rsp)
movq %r8, %r9
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L18:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
movq %r12, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call _ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh
jmp .L18
.cfi_endproc
.LFE2033:
.size _Z18batched_d2d_memcpyPPvS0_PmiP11CUstream_st, .-_Z18batched_d2d_memcpyPPvS0_PmiP11CUstream_st
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Copyright (C) 2018 NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#define TO_NEXT_MULT_P2(x,p) (((x)+((p)-1)) & ~(p-1))
__host__ __device__ ulonglong2 operator<<(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(0ull, a.x << (l-64));
} else {
b = make_ulonglong2(a.x << l, (a.y << l) | (a.x >> (8*sizeof(a.x)-l)));
}
return b;
}
__host__ __device__ ulonglong2 operator>>(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(a.y >> (l-64), 0ull);
} else {
b = make_ulonglong2((a.x >> l) | (a.y << (8*sizeof(a.y)-l)), a.y >> l);
}
return b;
}
__host__ __device__ ulonglong2 operator|(ulonglong2 a, ulonglong2 b) {
return make_ulonglong2(a.x | b.x, a.y | b.y);
}
template<int BDIM_X,
int MAXIOB,
int SH_BYTE_X_BL,
typename LDST_T>
__device__ void memcpy_d(const size_t n,
const unsigned char *__restrict__ src,
unsigned char *__restrict__ dst,
unsigned char *__restrict__ __sh) {
const int tid = threadIdx.x;
const unsigned long long srcULL = reinterpret_cast<unsigned long long>(src);
const unsigned long long dstULL = reinterpret_cast<unsigned long long>(dst);
int srcOff = (MAXIOB - srcULL) & (MAXIOB-1);
int dstOff = (MAXIOB - dstULL) & (MAXIOB-1);
const int ELXTH = SH_BYTE_X_BL/(BDIM_X*MAXIOB);
LDST_T *__ptrSH = reinterpret_cast<LDST_T *>(__sh);
if (srcOff == dstOff) {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = (n-srcOff) / sizeof(*__ptrLDG);
int remBytes = (n-srcOff) % sizeof(*__ptrLDG);
LDST_T __loc[ELXTH];
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (j*BDIM_X+tid < nread) {
__loc[j] = __ptrLDG[j*BDIM_X+tid];
}
}
for(int i = 0; i < nread; i += BDIM_X*ELXTH) {
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
__ptrSH[j*BDIM_X+tid] = __loc[j];
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + BDIM_X*ELXTH + j*BDIM_X + tid < nread) {
__loc[j] = __ptrLDG[i + BDIM_X*ELXTH + j*BDIM_X + tid];
}
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + j*BDIM_X + tid < nread) {
__ptrSTG[i + j*BDIM_X + tid] = __ptrSH[j*BDIM_X+tid];
}
}
}
if (tid < srcOff+remBytes) {
const int off = (tid < srcOff) ? tid : n-remBytes+tid-srcOff;
dst[off] = src[off];
}
} else {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = ((n-srcOff) / sizeof(*__ptrLDG));
int remBytes = ((n-srcOff) % sizeof(*__ptrLDG));
int lowShft, uppShft;
if (srcOff > dstOff) {
uppShft = (srcOff-dstOff)*8;
lowShft = (8*sizeof(*__ptrLDG)) - uppShft;
__ptrSTG++;
} else {
lowShft = (dstOff-srcOff)*8;
uppShft = (8*sizeof(*__ptrLDG)) - lowShft;
}
for(int i = 0; i < nread-1; i += BDIM_X) {
if (i+tid < nread-1) {
const LDST_T low = __ptrLDG[i+tid];
const LDST_T upp = __ptrLDG[i+tid+1];
__ptrSTG[i+tid] = (low >> lowShft) | (upp << uppShft);
}
}
remBytes += sizeof(*__ptrLDG);
if (srcOff > dstOff) {
dstOff += sizeof(*__ptrLDG);
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
} else {
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
}
}
}
template<int BDIM_X,
int MAXIOB>
__global__ void memcpy_k(const size_t *sizes,
const unsigned char *const __restrict__ *__restrict__ in,
unsigned char *__restrict__ *__restrict__ out) {
const int SH_BYTE_X_BL = 32768;
__shared__ unsigned char __sh[SH_BYTE_X_BL];
switch(MAXIOB) {
case 4:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned int>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 8:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned long long>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 16:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, ulonglong2>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
}
return;
}
#define NTHREADS 1024
void batched_d2d_memcpy(void** out_ptrs, void** in_ptrs, size_t* sizes, int num_copies, cudaStream_t stream)
{
memcpy_k<NTHREADS, 16><<<num_copies, NTHREADS, 0, stream>>>(sizes, (unsigned char**) in_ptrs, (unsigned char**) out_ptrs);
} | #include <hip/hip_runtime.h>
// Copyright (C) 2018 NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#define TO_NEXT_MULT_P2(x,p) (((x)+((p)-1)) & ~(p-1))
__host__ __device__ ulonglong2 operator<<(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(0ull, a.x << (l-64));
} else {
b = make_ulonglong2(a.x << l, (a.y << l) | (a.x >> (8*sizeof(a.x)-l)));
}
return b;
}
__host__ __device__ ulonglong2 operator>>(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(a.y >> (l-64), 0ull);
} else {
b = make_ulonglong2((a.x >> l) | (a.y << (8*sizeof(a.y)-l)), a.y >> l);
}
return b;
}
__host__ __device__ ulonglong2 operator|(ulonglong2 a, ulonglong2 b) {
return make_ulonglong2(a.x | b.x, a.y | b.y);
}
template<int BDIM_X,
int MAXIOB,
int SH_BYTE_X_BL,
typename LDST_T>
__device__ void memcpy_d(const size_t n,
const unsigned char *__restrict__ src,
unsigned char *__restrict__ dst,
unsigned char *__restrict__ __sh) {
const int tid = threadIdx.x;
const unsigned long long srcULL = reinterpret_cast<unsigned long long>(src);
const unsigned long long dstULL = reinterpret_cast<unsigned long long>(dst);
int srcOff = (MAXIOB - srcULL) & (MAXIOB-1);
int dstOff = (MAXIOB - dstULL) & (MAXIOB-1);
const int ELXTH = SH_BYTE_X_BL/(BDIM_X*MAXIOB);
LDST_T *__ptrSH = reinterpret_cast<LDST_T *>(__sh);
if (srcOff == dstOff) {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = (n-srcOff) / sizeof(*__ptrLDG);
int remBytes = (n-srcOff) % sizeof(*__ptrLDG);
LDST_T __loc[ELXTH];
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (j*BDIM_X+tid < nread) {
__loc[j] = __ptrLDG[j*BDIM_X+tid];
}
}
for(int i = 0; i < nread; i += BDIM_X*ELXTH) {
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
__ptrSH[j*BDIM_X+tid] = __loc[j];
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + BDIM_X*ELXTH + j*BDIM_X + tid < nread) {
__loc[j] = __ptrLDG[i + BDIM_X*ELXTH + j*BDIM_X + tid];
}
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + j*BDIM_X + tid < nread) {
__ptrSTG[i + j*BDIM_X + tid] = __ptrSH[j*BDIM_X+tid];
}
}
}
if (tid < srcOff+remBytes) {
const int off = (tid < srcOff) ? tid : n-remBytes+tid-srcOff;
dst[off] = src[off];
}
} else {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = ((n-srcOff) / sizeof(*__ptrLDG));
int remBytes = ((n-srcOff) % sizeof(*__ptrLDG));
int lowShft, uppShft;
if (srcOff > dstOff) {
uppShft = (srcOff-dstOff)*8;
lowShft = (8*sizeof(*__ptrLDG)) - uppShft;
__ptrSTG++;
} else {
lowShft = (dstOff-srcOff)*8;
uppShft = (8*sizeof(*__ptrLDG)) - lowShft;
}
for(int i = 0; i < nread-1; i += BDIM_X) {
if (i+tid < nread-1) {
const LDST_T low = __ptrLDG[i+tid];
const LDST_T upp = __ptrLDG[i+tid+1];
__ptrSTG[i+tid] = (low >> lowShft) | (upp << uppShft);
}
}
remBytes += sizeof(*__ptrLDG);
if (srcOff > dstOff) {
dstOff += sizeof(*__ptrLDG);
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
} else {
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
}
}
}
template<int BDIM_X,
int MAXIOB>
__global__ void memcpy_k(const size_t *sizes,
const unsigned char *const __restrict__ *__restrict__ in,
unsigned char *__restrict__ *__restrict__ out) {
const int SH_BYTE_X_BL = 32768;
__shared__ unsigned char __sh[SH_BYTE_X_BL];
switch(MAXIOB) {
case 4:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned int>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 8:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned long long>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 16:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, ulonglong2>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
}
return;
}
#define NTHREADS 1024
void batched_d2d_memcpy(void** out_ptrs, void** in_ptrs, size_t* sizes, int num_copies, hipStream_t stream)
{
memcpy_k<NTHREADS, 16><<<num_copies, NTHREADS, 0, stream>>>(sizes, (unsigned char**) in_ptrs, (unsigned char**) out_ptrs);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
// Copyright (C) 2018 NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#define TO_NEXT_MULT_P2(x,p) (((x)+((p)-1)) & ~(p-1))
__host__ __device__ ulonglong2 operator<<(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(0ull, a.x << (l-64));
} else {
b = make_ulonglong2(a.x << l, (a.y << l) | (a.x >> (8*sizeof(a.x)-l)));
}
return b;
}
__host__ __device__ ulonglong2 operator>>(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(a.y >> (l-64), 0ull);
} else {
b = make_ulonglong2((a.x >> l) | (a.y << (8*sizeof(a.y)-l)), a.y >> l);
}
return b;
}
__host__ __device__ ulonglong2 operator|(ulonglong2 a, ulonglong2 b) {
return make_ulonglong2(a.x | b.x, a.y | b.y);
}
template<int BDIM_X,
int MAXIOB,
int SH_BYTE_X_BL,
typename LDST_T>
__device__ void memcpy_d(const size_t n,
const unsigned char *__restrict__ src,
unsigned char *__restrict__ dst,
unsigned char *__restrict__ __sh) {
const int tid = threadIdx.x;
const unsigned long long srcULL = reinterpret_cast<unsigned long long>(src);
const unsigned long long dstULL = reinterpret_cast<unsigned long long>(dst);
int srcOff = (MAXIOB - srcULL) & (MAXIOB-1);
int dstOff = (MAXIOB - dstULL) & (MAXIOB-1);
const int ELXTH = SH_BYTE_X_BL/(BDIM_X*MAXIOB);
LDST_T *__ptrSH = reinterpret_cast<LDST_T *>(__sh);
if (srcOff == dstOff) {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = (n-srcOff) / sizeof(*__ptrLDG);
int remBytes = (n-srcOff) % sizeof(*__ptrLDG);
LDST_T __loc[ELXTH];
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (j*BDIM_X+tid < nread) {
__loc[j] = __ptrLDG[j*BDIM_X+tid];
}
}
for(int i = 0; i < nread; i += BDIM_X*ELXTH) {
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
__ptrSH[j*BDIM_X+tid] = __loc[j];
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + BDIM_X*ELXTH + j*BDIM_X + tid < nread) {
__loc[j] = __ptrLDG[i + BDIM_X*ELXTH + j*BDIM_X + tid];
}
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + j*BDIM_X + tid < nread) {
__ptrSTG[i + j*BDIM_X + tid] = __ptrSH[j*BDIM_X+tid];
}
}
}
if (tid < srcOff+remBytes) {
const int off = (tid < srcOff) ? tid : n-remBytes+tid-srcOff;
dst[off] = src[off];
}
} else {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = ((n-srcOff) / sizeof(*__ptrLDG));
int remBytes = ((n-srcOff) % sizeof(*__ptrLDG));
int lowShft, uppShft;
if (srcOff > dstOff) {
uppShft = (srcOff-dstOff)*8;
lowShft = (8*sizeof(*__ptrLDG)) - uppShft;
__ptrSTG++;
} else {
lowShft = (dstOff-srcOff)*8;
uppShft = (8*sizeof(*__ptrLDG)) - lowShft;
}
for(int i = 0; i < nread-1; i += BDIM_X) {
if (i+tid < nread-1) {
const LDST_T low = __ptrLDG[i+tid];
const LDST_T upp = __ptrLDG[i+tid+1];
__ptrSTG[i+tid] = (low >> lowShft) | (upp << uppShft);
}
}
remBytes += sizeof(*__ptrLDG);
if (srcOff > dstOff) {
dstOff += sizeof(*__ptrLDG);
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
} else {
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
}
}
}
template<int BDIM_X,
int MAXIOB>
__global__ void memcpy_k(const size_t *sizes,
const unsigned char *const __restrict__ *__restrict__ in,
unsigned char *__restrict__ *__restrict__ out) {
const int SH_BYTE_X_BL = 32768;
__shared__ unsigned char __sh[SH_BYTE_X_BL];
switch(MAXIOB) {
case 4:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned int>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 8:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned long long>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 16:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, ulonglong2>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
}
return;
}
#define NTHREADS 1024
void batched_d2d_memcpy(void** out_ptrs, void** in_ptrs, size_t* sizes, int num_copies, hipStream_t stream)
{
memcpy_k<NTHREADS, 16><<<num_copies, NTHREADS, 0, stream>>>(sizes, (unsigned char**) in_ptrs, (unsigned char**) out_ptrs);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,"axG",@progbits,_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,comdat
.protected _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.globl _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.p2align 8
.type _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,@function
_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh:
s_clause 0x1
s_load_b128 s[4:7], s[2:3], 0x0
s_load_b64 s[2:3], s[2:3], 0x10
s_mov_b32 s8, s15
s_mov_b32 s9, 0
v_and_b32_e32 v15, 0x3ff, v0
s_lshl_b64 s[8:9], s[8:9], 3
s_waitcnt lgkmcnt(0)
s_add_u32 s10, s4, s8
s_addc_u32 s11, s5, s9
s_add_u32 s4, s6, s8
s_addc_u32 s5, s7, s9
s_add_u32 s2, s2, s8
s_addc_u32 s3, s3, s9
s_load_b64 s[6:7], s[4:5], 0x0
s_load_b64 s[4:5], s[2:3], 0x0
s_load_b64 s[8:9], s[10:11], 0x0
s_waitcnt lgkmcnt(0)
s_sub_i32 s2, 0, s6
s_sub_i32 s10, 0, s4
s_and_b32 s3, s2, 15
s_and_b32 s10, s10, 15
s_mov_b32 s2, -1
s_cmp_lg_u32 s10, s3
s_cbranch_scc0 .LBB0_27
s_add_u32 s2, s4, s10
s_addc_u32 s21, s5, 0
s_cmp_ge_u32 s10, s3
s_cselect_b32 s11, -1, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s11
s_cbranch_vccz .LBB0_3
s_sub_i32 s12, s10, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b32 s13, s12, 3
s_sub_i32 s12, 0x80, s13
s_cbranch_execz .LBB0_4
s_branch .LBB0_5
.LBB0_3:
.LBB0_4:
s_sub_i32 s12, s3, s10
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b32 s12, s12, 3
s_sub_i32 s13, 0x80, s12
s_add_u32 s2, s2, 16
s_addc_u32 s21, s21, 0
.LBB0_5:
s_sub_u32 s14, s8, s3
s_subb_u32 s15, s9, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_alignbit_b32 v1, s15, s14, 4
v_cmp_gt_i32_e32 vcc_lo, 2, v1
s_cbranch_vccnz .LBB0_18
v_add_nc_u32_e32 v16, -1, v1
v_lshlrev_b32_e32 v1, 4, v15
s_cmpk_lt_i32 s13, 0x41
s_cselect_b32 s15, -1, 0
s_sub_i32 s16, 64, s13
s_sub_i32 s17, s13, 64
s_cmpk_lt_i32 s12, 0x41
v_add_co_u32 v9, s2, s2, v1
s_cselect_b32 s18, -1, 0
s_sub_i32 s19, 64, s12
s_sub_i32 s20, s12, 64
v_add_co_ci_u32_e64 v10, null, s21, 0, s2
s_add_u32 s2, s6, s3
s_addc_u32 s21, s7, 0
v_add_co_u32 v11, s2, s2, v1
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v12, null, s21, 0, s2
s_mov_b32 s21, 0
s_branch .LBB0_9
.LBB0_7:
s_waitcnt vmcnt(0) lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_or_b32_e32 v2, v8, v14
v_or_b32_e32 v1, v7, v13
v_or_b32_e32 v4, v4, v6
v_or_b32_e32 v3, v3, v5
flat_store_b128 v[9:10], v[1:4]
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s2
v_add_co_u32 v9, vcc_lo, v9, 0x4000
s_addk_i32 s21, 0x400
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v10, vcc_lo
v_cmp_ge_i32_e32 vcc_lo, s21, v16
v_add_co_u32 v11, s2, v11, 0x4000
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v12, s2, 0, v12, s2
s_cbranch_vccnz .LBB0_18
.LBB0_9:
v_add_nc_u32_e32 v1, s21, v15
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_i32_e64 v1, v16
s_cbranch_execz .LBB0_8
s_clause 0x1
flat_load_b128 v[5:8], v[11:12]
flat_load_b128 v[1:4], v[11:12] offset:16
s_and_b32 vcc_lo, exec_lo, s15
s_cbranch_vccz .LBB0_12
s_waitcnt vmcnt(1) lgkmcnt(1)
v_lshrrev_b64 v[13:14], s13, v[5:6]
v_lshlrev_b64 v[17:18], s16, v[7:8]
v_lshrrev_b64 v[5:6], s13, v[7:8]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_or_b32_e32 v14, v14, v18
v_or_b32_e32 v13, v13, v17
s_cbranch_execz .LBB0_13
s_branch .LBB0_14
.LBB0_12:
.LBB0_13:
s_waitcnt vmcnt(1) lgkmcnt(1)
v_lshrrev_b64 v[13:14], s17, v[7:8]
v_mov_b32_e32 v5, 0
v_mov_b32_e32 v6, 0
.LBB0_14:
s_and_b32 vcc_lo, exec_lo, s18
s_cbranch_vccz .LBB0_16
s_waitcnt vmcnt(0) lgkmcnt(0)
v_lshlrev_b64 v[3:4], s12, v[3:4]
v_lshrrev_b64 v[17:18], s19, v[1:2]
v_lshlrev_b64 v[7:8], s12, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_or_b32_e32 v4, v4, v18
v_or_b32_e32 v3, v3, v17
s_cbranch_execnz .LBB0_7
s_branch .LBB0_17
.LBB0_16:
.LBB0_17:
s_waitcnt vmcnt(0) lgkmcnt(0)
v_lshlrev_b64 v[3:4], s20, v[1:2]
v_mov_b32_e32 v7, 0
v_mov_b32_e32 v8, 0
s_branch .LBB0_7
.LBB0_18:
s_and_b32 s2, s14, 15
s_and_b32 vcc_lo, exec_lo, s11
s_or_b32 s2, s2, 16
s_mov_b32 s11, -1
s_cbranch_vccz .LBB0_22
s_add_i32 s12, s2, s10
s_mov_b32 s11, exec_lo
v_cmpx_gt_u32_e64 s12, v15
s_cbranch_execz .LBB0_21
v_add_nc_u32_e32 v1, s8, v15
v_cmp_gt_u32_e32 vcc_lo, s10, v15
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_subrev_nc_u32_e32 v1, s12, v1
v_cndmask_b32_e32 v3, v1, v15, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v4, 31, v3
v_add_co_u32 v1, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v4, vcc_lo
flat_load_u8 v5, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v4, vcc_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
flat_store_b8 v[1:2], v5
.LBB0_21:
s_or_b32 exec_lo, exec_lo, s11
s_mov_b32 s11, 0
.LBB0_22:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s11
s_cbranch_vccnz .LBB0_26
s_or_b32 s11, s10, 16
s_delay_alu instid0(SALU_CYCLE_1)
s_add_i32 s12, s2, s11
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s12, v15
s_cbranch_execz .LBB0_25
v_add_nc_u32_e32 v1, s8, v15
v_cmp_gt_u32_e32 vcc_lo, s11, v15
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_subrev_nc_u32_e32 v1, s12, v1
v_cndmask_b32_e32 v3, v1, v15, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v4, 31, v3
v_add_co_u32 v1, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v4, vcc_lo
flat_load_u8 v5, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v4, vcc_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
flat_store_b8 v[1:2], v5
.LBB0_25:
s_or_b32 exec_lo, exec_lo, s2
.LBB0_26:
s_mov_b32 s2, 0
.LBB0_27:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s2
s_cbranch_vccz .LBB0_47
s_load_b64 s[0:1], s[0:1], 0x4
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v2, v0, 10, 10
v_bfe_u32 v0, v0, 20, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s0, s0, 16
s_delay_alu instid0(VALU_DEP_2)
v_mul_u32_u24_e32 v2, s1, v2
s_mul_i32 s0, s0, s1
s_add_u32 s2, s6, s3
v_mul_lo_u32 v1, s0, v1
s_addc_u32 s11, s7, 0
s_sub_u32 s0, s8, s3
s_subb_u32 s1, s9, 0
s_mov_b32 s9, exec_lo
v_alignbit_b32 v3, s1, s0, 4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add3_u32 v0, v1, v2, v0
v_readfirstlane_b32 s1, v3
s_delay_alu instid0(VALU_DEP_2)
v_lshl_add_u32 v4, v0, 5, 0x8000
v_cmpx_lt_i32_e64 v15, v3
s_cbranch_execz .LBB0_30
v_lshlrev_b32_e32 v0, 4, v15
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v0, s12, s2, v0
v_add_co_ci_u32_e64 v1, null, s11, 0, s12
flat_load_b128 v[0:3], v[0:1]
s_waitcnt vmcnt(0) lgkmcnt(0)
ds_store_b128 v4, v[0:3]
.LBB0_30:
s_or_b32 exec_lo, exec_lo, s9
v_or_b32_e32 v0, 0x400, v15
s_mov_b32 s9, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s1, v0
s_cbranch_execz .LBB0_32
v_lshlrev_b32_e32 v1, 4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v1, s2, s2, v1
v_add_co_ci_u32_e64 v2, null, s11, 0, s2
flat_load_b128 v[5:8], v[1:2]
s_waitcnt vmcnt(0) lgkmcnt(0)
ds_store_b128 v4, v[5:8] offset:16
.LBB0_32:
s_or_b32 exec_lo, exec_lo, s9
s_cmp_lt_i32 s1, 1
s_cbranch_scc1 .LBB0_43
v_lshlrev_b32_e32 v5, 4, v15
s_add_u32 s2, s4, s10
s_addc_u32 s9, s5, 0
s_add_u32 s10, s6, s3
s_addc_u32 s11, s7, 0
v_add_co_u32 v2, s10, s10, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e64 v3, null, s11, 0, s10
v_lshlrev_b32_e32 v7, 4, v0
v_add_co_u32 v0, s2, s2, v5
v_add_co_u32 v2, vcc_lo, v2, 0xc000
v_add_nc_u32_e32 v6, 16, v4
v_add_co_ci_u32_e64 v1, null, s9, 0, s2
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_mov_b32 s2, 0
s_branch .LBB0_35
.LBB0_34:
s_or_b32 exec_lo, exec_lo, s9
v_add_co_u32 v0, vcc_lo, v0, 0x8000
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, 0x8000
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_addk_i32 s2, 0x800
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s2, s1
s_cbranch_scc1 .LBB0_43
.LBB0_35:
ds_load_b128 v[8:11], v4
ds_load_b128 v[16:19], v6
v_add3_u32 v12, v15, s2, 0x800
s_mov_b32 s9, exec_lo
s_waitcnt lgkmcnt(1)
ds_store_b128 v5, v[8:11]
s_waitcnt lgkmcnt(1)
ds_store_b128 v7, v[16:19]
v_cmpx_gt_i32_e64 s1, v12
s_cbranch_execz .LBB0_37
v_add_co_u32 v8, vcc_lo, 0xffffc000, v2
v_add_co_ci_u32_e32 v9, vcc_lo, -1, v3, vcc_lo
flat_load_b128 v[8:11], v[8:9]
s_waitcnt vmcnt(0) lgkmcnt(0)
ds_store_b128 v4, v[8:11]
.LBB0_37:
s_or_b32 exec_lo, exec_lo, s9
v_add3_u32 v8, v15, s2, 0xc00
s_mov_b32 s9, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s1, v8
s_cbranch_execz .LBB0_39
flat_load_b128 v[8:11], v[2:3]
s_waitcnt vmcnt(0) lgkmcnt(0)
ds_store_b128 v6, v[8:11]
.LBB0_39:
s_or_b32 exec_lo, exec_lo, s9
v_add_nc_u32_e32 v8, s2, v15
s_mov_b32 s9, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s1, v8
s_cbranch_execz .LBB0_41
ds_load_b128 v[8:11], v5
s_waitcnt lgkmcnt(0)
flat_store_b128 v[0:1], v[8:11]
.LBB0_41:
s_or_b32 exec_lo, exec_lo, s9
v_add3_u32 v8, v15, s2, 0x400
s_mov_b32 s9, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s1, v8
s_cbranch_execz .LBB0_34
ds_load_b128 v[8:11], v7
v_add_co_u32 v12, vcc_lo, 0x4000, v0
v_add_co_ci_u32_e32 v13, vcc_lo, 0, v1, vcc_lo
s_waitcnt lgkmcnt(0)
flat_store_b128 v[12:13], v[8:11]
s_branch .LBB0_34
.LBB0_43:
s_and_b32 s1, s0, 15
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s1, s1, s3
v_cmp_gt_u32_e32 vcc_lo, s1, v15
s_and_saveexec_b32 s1, vcc_lo
s_cbranch_execz .LBB0_47
s_mov_b32 s1, exec_lo
v_cmpx_le_u32_e64 s3, v15
v_add_nc_u32_e32 v0, s8, v15
s_and_b32 s0, s0, 15
s_delay_alu instid0(SALU_CYCLE_1)
s_add_i32 s0, s0, s3
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_subrev_nc_u32_e32 v15, s0, v0
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v15
v_add_co_u32 v0, vcc_lo, s6, v15
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v2, vcc_lo
flat_load_u8 v3, v[0:1]
v_add_co_u32 v0, vcc_lo, s4, v15
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v2, vcc_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
flat_store_b8 v[0:1], v3
.LBB0_47:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.amdhsa_group_segment_fixed_size 65536
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 1
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 20
.amdhsa_next_free_sgpr 22
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,"axG",@progbits,_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,comdat
.Lfunc_end0:
.size _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, .Lfunc_end0-_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .actual_access: read_only
.address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .actual_access: read_only
.address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 65536
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.private_segment_fixed_size: 0
.sgpr_count: 24
.sgpr_spill_count: 0
.symbol: _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 20
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
// Copyright (C) 2018 NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#define TO_NEXT_MULT_P2(x,p) (((x)+((p)-1)) & ~(p-1))
__host__ __device__ ulonglong2 operator<<(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(0ull, a.x << (l-64));
} else {
b = make_ulonglong2(a.x << l, (a.y << l) | (a.x >> (8*sizeof(a.x)-l)));
}
return b;
}
__host__ __device__ ulonglong2 operator>>(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(a.y >> (l-64), 0ull);
} else {
b = make_ulonglong2((a.x >> l) | (a.y << (8*sizeof(a.y)-l)), a.y >> l);
}
return b;
}
__host__ __device__ ulonglong2 operator|(ulonglong2 a, ulonglong2 b) {
return make_ulonglong2(a.x | b.x, a.y | b.y);
}
template<int BDIM_X,
int MAXIOB,
int SH_BYTE_X_BL,
typename LDST_T>
__device__ void memcpy_d(const size_t n,
const unsigned char *__restrict__ src,
unsigned char *__restrict__ dst,
unsigned char *__restrict__ __sh) {
const int tid = threadIdx.x;
const unsigned long long srcULL = reinterpret_cast<unsigned long long>(src);
const unsigned long long dstULL = reinterpret_cast<unsigned long long>(dst);
int srcOff = (MAXIOB - srcULL) & (MAXIOB-1);
int dstOff = (MAXIOB - dstULL) & (MAXIOB-1);
const int ELXTH = SH_BYTE_X_BL/(BDIM_X*MAXIOB);
LDST_T *__ptrSH = reinterpret_cast<LDST_T *>(__sh);
if (srcOff == dstOff) {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = (n-srcOff) / sizeof(*__ptrLDG);
int remBytes = (n-srcOff) % sizeof(*__ptrLDG);
LDST_T __loc[ELXTH];
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (j*BDIM_X+tid < nread) {
__loc[j] = __ptrLDG[j*BDIM_X+tid];
}
}
for(int i = 0; i < nread; i += BDIM_X*ELXTH) {
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
__ptrSH[j*BDIM_X+tid] = __loc[j];
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + BDIM_X*ELXTH + j*BDIM_X + tid < nread) {
__loc[j] = __ptrLDG[i + BDIM_X*ELXTH + j*BDIM_X + tid];
}
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + j*BDIM_X + tid < nread) {
__ptrSTG[i + j*BDIM_X + tid] = __ptrSH[j*BDIM_X+tid];
}
}
}
if (tid < srcOff+remBytes) {
const int off = (tid < srcOff) ? tid : n-remBytes+tid-srcOff;
dst[off] = src[off];
}
} else {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = ((n-srcOff) / sizeof(*__ptrLDG));
int remBytes = ((n-srcOff) % sizeof(*__ptrLDG));
int lowShft, uppShft;
if (srcOff > dstOff) {
uppShft = (srcOff-dstOff)*8;
lowShft = (8*sizeof(*__ptrLDG)) - uppShft;
__ptrSTG++;
} else {
lowShft = (dstOff-srcOff)*8;
uppShft = (8*sizeof(*__ptrLDG)) - lowShft;
}
for(int i = 0; i < nread-1; i += BDIM_X) {
if (i+tid < nread-1) {
const LDST_T low = __ptrLDG[i+tid];
const LDST_T upp = __ptrLDG[i+tid+1];
__ptrSTG[i+tid] = (low >> lowShft) | (upp << uppShft);
}
}
remBytes += sizeof(*__ptrLDG);
if (srcOff > dstOff) {
dstOff += sizeof(*__ptrLDG);
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
} else {
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
}
}
}
template<int BDIM_X,
int MAXIOB>
__global__ void memcpy_k(const size_t *sizes,
const unsigned char *const __restrict__ *__restrict__ in,
unsigned char *__restrict__ *__restrict__ out) {
const int SH_BYTE_X_BL = 32768;
__shared__ unsigned char __sh[SH_BYTE_X_BL];
switch(MAXIOB) {
case 4:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned int>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 8:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned long long>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 16:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, ulonglong2>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
}
return;
}
#define NTHREADS 1024
void batched_d2d_memcpy(void** out_ptrs, void** in_ptrs, size_t* sizes, int num_copies, hipStream_t stream)
{
memcpy_k<NTHREADS, 16><<<num_copies, NTHREADS, 0, stream>>>(sizes, (unsigned char**) in_ptrs, (unsigned char**) out_ptrs);
} | .text
.file "batched_memcpy.hip"
.globl _Zls15HIP_vector_typeIyLj2EEi # -- Begin function _Zls15HIP_vector_typeIyLj2EEi
.p2align 4, 0x90
.type _Zls15HIP_vector_typeIyLj2EEi,@function
_Zls15HIP_vector_typeIyLj2EEi: # @_Zls15HIP_vector_typeIyLj2EEi
.cfi_startproc
# %bb.0:
movl %edx, %ecx
cmpl $65, %edx
jl .LBB0_2
# %bb.1:
# kill: def $cl killed $cl killed $ecx
shlq %cl, %rdi
xorl %eax, %eax
movq %rdi, %rdx
retq
.LBB0_2:
movq %rdi, %rax
shlq %cl, %rax
# kill: def $cl killed $cl killed $ecx
shldq %cl, %rdi, %rsi
movq %rsi, %rdx
retq
.Lfunc_end0:
.size _Zls15HIP_vector_typeIyLj2EEi, .Lfunc_end0-_Zls15HIP_vector_typeIyLj2EEi
.cfi_endproc
# -- End function
.globl _Zrs15HIP_vector_typeIyLj2EEi # -- Begin function _Zrs15HIP_vector_typeIyLj2EEi
.p2align 4, 0x90
.type _Zrs15HIP_vector_typeIyLj2EEi,@function
_Zrs15HIP_vector_typeIyLj2EEi: # @_Zrs15HIP_vector_typeIyLj2EEi
.cfi_startproc
# %bb.0:
movl %edx, %ecx
movq %rsi, %rax
cmpl $65, %edx
jl .LBB1_2
# %bb.1:
# kill: def $cl killed $cl killed $ecx
shrq %cl, %rax
xorl %edx, %edx
retq
.LBB1_2:
shrdq %cl, %rax, %rdi
movq %rax, %rdx
# kill: def $cl killed $cl killed $ecx
shrq %cl, %rdx
movq %rdi, %rax
retq
.Lfunc_end1:
.size _Zrs15HIP_vector_typeIyLj2EEi, .Lfunc_end1-_Zrs15HIP_vector_typeIyLj2EEi
.cfi_endproc
# -- End function
.globl _Zor15HIP_vector_typeIyLj2EES0_ # -- Begin function _Zor15HIP_vector_typeIyLj2EES0_
.p2align 4, 0x90
.type _Zor15HIP_vector_typeIyLj2EES0_,@function
_Zor15HIP_vector_typeIyLj2EES0_: # @_Zor15HIP_vector_typeIyLj2EES0_
.cfi_startproc
# %bb.0:
movq %rdi, %rax
orq %rdx, %rax
orq %rcx, %rsi
movq %rsi, %rdx
retq
.Lfunc_end2:
.size _Zor15HIP_vector_typeIyLj2EES0_, .Lfunc_end2-_Zor15HIP_vector_typeIyLj2EES0_
.cfi_endproc
# -- End function
.globl _Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t # -- Begin function _Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t
.p2align 4, 0x90
.type _Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t,@function
_Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t: # @_Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %r8, %r9
movq %rdx, %r15
movq %rsi, %r14
movq %rdi, %rbx
movl %ecx, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $1024, %rdx # imm = 0x400
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movq %rbx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t, .Lfunc_end3-_Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t
.cfi_endproc
# -- End function
.section .text._Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,"axG",@progbits,_Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,comdat
.weak _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh # -- Begin function _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.p2align 4, 0x90
.type _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,@function
_Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh: # @_Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, .Lfunc_end4-_Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,@object # @_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.section .rodata._Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,"aG",@progbits,_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,comdat
.weak _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.p2align 3, 0x0
_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh:
.quad _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.size _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh"
.size .L__unnamed_1, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001837f3_00000000-6_batched_memcpy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh, @function
_ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh:
.LFB2058:
.cfi_startproc
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsi, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
movq %rdx, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L5
.L1:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L1
.L6:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh, .-_ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh
.section .text._Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,"axG",@progbits,_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,comdat
.weak _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.type _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, @function
_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh:
.LFB2107:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2107:
.size _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, .-_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.text
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2036:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2036:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Zls10ulonglong2i
.type _Zls10ulonglong2i, @function
_Zls10ulonglong2i:
.LFB2027:
.cfi_startproc
endbr64
cmpl $64, %edx
jle .L12
leal -64(%rdx), %ecx
salq %cl, %rdi
movq %rdi, %rsi
movl $0, %eax
.L13:
movq %rsi, %rdx
ret
.L12:
movl %edx, %ecx
salq %cl, %rsi
movl $64, %ecx
subl %edx, %ecx
movq %rdi, %rax
shrq %cl, %rax
orq %rax, %rsi
movq %rdi, %rax
movl %edx, %ecx
salq %cl, %rax
jmp .L13
.cfi_endproc
.LFE2027:
.size _Zls10ulonglong2i, .-_Zls10ulonglong2i
.globl _Zrs10ulonglong2i
.type _Zrs10ulonglong2i, @function
_Zrs10ulonglong2i:
.LFB2028:
.cfi_startproc
endbr64
cmpl $64, %edx
jle .L15
leal -64(%rdx), %ecx
movq %rsi, %rax
shrq %cl, %rax
movl $0, %r8d
.L16:
movq %r8, %rdx
ret
.L15:
movq %rsi, %r8
movl %edx, %ecx
shrq %cl, %r8
shrq %cl, %rdi
movl $64, %ecx
subl %edx, %ecx
salq %cl, %rsi
movq %rdi, %rax
orq %rsi, %rax
jmp .L16
.cfi_endproc
.LFE2028:
.size _Zrs10ulonglong2i, .-_Zrs10ulonglong2i
.globl _Zor10ulonglong2S_
.type _Zor10ulonglong2S_, @function
_Zor10ulonglong2S_:
.LFB2029:
.cfi_startproc
endbr64
movq %rdx, %rax
orq %rcx, %rsi
movq %rsi, %rdx
orq %rdi, %rax
ret
.cfi_endproc
.LFE2029:
.size _Zor10ulonglong2S_, .-_Zor10ulonglong2S_
.globl _Z18batched_d2d_memcpyPPvS0_PmiP11CUstream_st
.type _Z18batched_d2d_memcpyPPvS0_PmiP11CUstream_st, @function
_Z18batched_d2d_memcpyPPvS0_PmiP11CUstream_st:
.LFB2033:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r12
movq %rsi, %rbp
movq %rdx, %rbx
movl $1024, 20(%rsp)
movl $1, 24(%rsp)
movl %ecx, 8(%rsp)
movl $1, 12(%rsp)
movq %r8, %r9
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L18:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
movq %r12, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call _ZL53__device_stub__Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPhPKmPrKPKhPrPh
jmp .L18
.cfi_endproc
.LFE2033:
.size _Z18batched_d2d_memcpyPPvS0_PmiP11CUstream_st, .-_Z18batched_d2d_memcpyPPvS0_PmiP11CUstream_st
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "batched_memcpy.hip"
.globl _Zls15HIP_vector_typeIyLj2EEi # -- Begin function _Zls15HIP_vector_typeIyLj2EEi
.p2align 4, 0x90
.type _Zls15HIP_vector_typeIyLj2EEi,@function
_Zls15HIP_vector_typeIyLj2EEi: # @_Zls15HIP_vector_typeIyLj2EEi
.cfi_startproc
# %bb.0:
movl %edx, %ecx
cmpl $65, %edx
jl .LBB0_2
# %bb.1:
# kill: def $cl killed $cl killed $ecx
shlq %cl, %rdi
xorl %eax, %eax
movq %rdi, %rdx
retq
.LBB0_2:
movq %rdi, %rax
shlq %cl, %rax
# kill: def $cl killed $cl killed $ecx
shldq %cl, %rdi, %rsi
movq %rsi, %rdx
retq
.Lfunc_end0:
.size _Zls15HIP_vector_typeIyLj2EEi, .Lfunc_end0-_Zls15HIP_vector_typeIyLj2EEi
.cfi_endproc
# -- End function
.globl _Zrs15HIP_vector_typeIyLj2EEi # -- Begin function _Zrs15HIP_vector_typeIyLj2EEi
.p2align 4, 0x90
.type _Zrs15HIP_vector_typeIyLj2EEi,@function
_Zrs15HIP_vector_typeIyLj2EEi: # @_Zrs15HIP_vector_typeIyLj2EEi
.cfi_startproc
# %bb.0:
movl %edx, %ecx
movq %rsi, %rax
cmpl $65, %edx
jl .LBB1_2
# %bb.1:
# kill: def $cl killed $cl killed $ecx
shrq %cl, %rax
xorl %edx, %edx
retq
.LBB1_2:
shrdq %cl, %rax, %rdi
movq %rax, %rdx
# kill: def $cl killed $cl killed $ecx
shrq %cl, %rdx
movq %rdi, %rax
retq
.Lfunc_end1:
.size _Zrs15HIP_vector_typeIyLj2EEi, .Lfunc_end1-_Zrs15HIP_vector_typeIyLj2EEi
.cfi_endproc
# -- End function
.globl _Zor15HIP_vector_typeIyLj2EES0_ # -- Begin function _Zor15HIP_vector_typeIyLj2EES0_
.p2align 4, 0x90
.type _Zor15HIP_vector_typeIyLj2EES0_,@function
_Zor15HIP_vector_typeIyLj2EES0_: # @_Zor15HIP_vector_typeIyLj2EES0_
.cfi_startproc
# %bb.0:
movq %rdi, %rax
orq %rdx, %rax
orq %rcx, %rsi
movq %rsi, %rdx
retq
.Lfunc_end2:
.size _Zor15HIP_vector_typeIyLj2EES0_, .Lfunc_end2-_Zor15HIP_vector_typeIyLj2EES0_
.cfi_endproc
# -- End function
.globl _Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t # -- Begin function _Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t
.p2align 4, 0x90
.type _Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t,@function
_Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t: # @_Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %r8, %r9
movq %rdx, %r15
movq %rsi, %r14
movq %rdi, %rbx
movl %ecx, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $1024, %rdx # imm = 0x400
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movq %rbx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t, .Lfunc_end3-_Z18batched_d2d_memcpyPPvS0_PmiP12ihipStream_t
.cfi_endproc
# -- End function
.section .text._Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,"axG",@progbits,_Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,comdat
.weak _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh # -- Begin function _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.p2align 4, 0x90
.type _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,@function
_Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh: # @_Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, .Lfunc_end4-_Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,@object # @_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.section .rodata._Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,"aG",@progbits,_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh,comdat
.weak _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.p2align 3, 0x0
_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh:
.quad _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.size _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh"
.size .L__unnamed_1, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8memcpy_kILi1024ELi16EEvPKmPrKPKhPrPh
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
constexpr int LEN_INITAL = 8;
constexpr int STEPS = 6;
constexpr int LEN_W = 1 + 2 * STEPS;
constexpr int LEN_Z = LEN_W;
constexpr int LEN_Y = LEN_INITAL + 2 * STEPS;
constexpr int LEN_X = LEN_INITAL + 2 * STEPS;
constexpr int LEN_TOTAL = LEN_W * LEN_Z * LEN_Y * LEN_X;
constexpr int SIZE_W = LEN_Z * LEN_Y * LEN_X;
constexpr int SIZE_Z = LEN_Y * LEN_X;
constexpr int SIZE_Y = LEN_X;
constexpr int NUM_THREADS = 512;
constexpr int NUM_BLOCKS = LEN_TOTAL / NUM_THREADS;
inline int coord_to_idx(const int w, const int z, const int y, const int x) {
return
w * SIZE_W +
z * SIZE_Z +
y * SIZE_Y +
x;
}
inline void print_slice(const int* grid, const int w, const int z) {
for (int row = 0; row < LEN_Y; ++row) {
for (int col = 0; col < LEN_X; ++col) {
std::cout << grid[coord_to_idx(w, z, row, col)] << " ";
}
std::cout << "\n";
}
}
__device__
int coord_to_idx_dev(const int w, const int z, const int y, const int x) {
return w * SIZE_W + z * SIZE_Z + y * SIZE_Y + x;
}
__global__
void step(const int* grid, int* grid_next) {
// Find out where we are.
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < LEN_TOTAL) {
int left = idx;
int w = idx / SIZE_W;
left = idx - w * SIZE_W;
int z = left / SIZE_Z;
left = left - z * SIZE_Z;
int y = left / SIZE_Y;
int x = left - y * SIZE_Y;
// TODO: for loop here?
const int active = grid[idx];
// Count active neighbors.
int active_neighbors = 0;
int min_nw = max(0, w - 1);
int max_nw = min(LEN_W, w + 2);
int min_nz = max(0, z - 1);
int max_nz = min(LEN_Z, z + 2);
int min_ny = max(0, y - 1);
int max_ny = min(LEN_Y, y + 2);
int min_nx = max(0, x - 1);
int max_nx = min(LEN_X, x + 2);
for (int nw = min_nw; nw < max_nw; ++nw) {
for (int nz = min_nz; nz < max_nz; ++nz) {
for (int ny = min_ny; ny < max_ny; ++ny) {
for (int nx = min_nx; nx < max_nx; ++nx) {
active_neighbors += grid[coord_to_idx_dev(nw, nz, ny, nx)];
}
}
}
}
active_neighbors -= active;
// Rules
int active_next = active;
if (active == 1 && (active_neighbors < 2 || active_neighbors > 3)) {
active_next = 0;
}
else if (active == 0 && active_neighbors == 3) {
active_next = 1;
}
//active_next = idx;
grid_next[idx] = active_next;
}
}
int main() {
// Initialize grid.
int* grid;
cudaMallocManaged(&grid, LEN_TOTAL * sizeof(int));
cudaMemset(grid, 0, LEN_TOTAL);
int initial_grid[LEN_INITAL][LEN_INITAL] = {
{1, 1, 0, 0, 1, 0, 1, 0},
{1, 1, 1, 0, 1, 0, 1, 1},
{0, 0, 1, 1, 1, 0, 0, 1},
{0, 1, 0, 0, 0, 0, 1, 1},
{0, 1, 0, 0, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 1, 0},
{1, 0, 1, 1, 0, 1, 0, 1}
};
for (int row = 0; row < LEN_INITAL; ++row) {
for (int col = 0; col < LEN_INITAL; ++col) {
grid[coord_to_idx(STEPS, STEPS, STEPS + row, STEPS + col)] = initial_grid[row][col];
}
}
//print_slice(grid, STEPS, STEPS);
int* grid_next;
cudaMallocManaged(&grid_next, LEN_TOTAL * sizeof(int));
for (int i = 0; i < STEPS; ++i) {
//std::cout << "Step " << i << "\n";
step<<<NUM_BLOCKS, NUM_THREADS>>>(grid, grid_next);
cudaDeviceSynchronize();
std::swap(grid, grid_next);
//print_slice(grid, STEPS, STEPS);
}
// Count actives.
int count = 0;
for (int i = 0; i < LEN_TOTAL; ++i) {
count += grid[i];
}
std::cout << "Active: " << count << "\n";
cudaFree(grid);
cudaFree(grid_next);
return 0;
} | code for sm_80
Function : _Z4stepPKiPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R2, c[0x0][0x0], R3 ; /* 0x0000000002007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R0, 0x1080f, PT ; /* 0x0001080f0000780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.HI R6, R0, 0x64d319ff, RZ ; /* 0x64d319ff00067827 */
/* 0x000fe200078e02ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0090*/ SHF.R.U32.HI R7, RZ, 0x1f, R6 ; /* 0x0000001fff077819 */
/* 0x000fc60000011606 */
/*00a0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fe200078e0205 */
/*00b0*/ LEA.HI.SX32 R15, R6, R7, 0x15 ; /* 0x00000007060f7211 */
/* 0x000fca00078faaff */
/*00c0*/ IMAD R7, R15.reuse, -0x1450, R0 ; /* 0xffffebb00f077824 */
/* 0x040fe200078e0200 */
/*00d0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000162000c1e1900 */
/*00e0*/ IADD3 R10, R15, -0x1, RZ ; /* 0xffffffff0f0a7810 */
/* 0x000fe20007ffe0ff */
/*00f0*/ BSSY B3, 0xb60 ; /* 0x00000a6000037945 */
/* 0x000fe20003800000 */
/*0100*/ IMAD.HI R12, R7, 0x51eb851f, RZ ; /* 0x51eb851f070c7827 */
/* 0x000fe400078e02ff */
/*0110*/ IMNMX R10, RZ, R10, !PT ; /* 0x0000000aff0a7217 */
/* 0x000fe40007800200 */
/*0120*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e00ff */
/*0130*/ SHF.R.U32.HI R9, RZ, 0x1f, R12 ; /* 0x0000001fff097819 */
/* 0x000fe4000001160c */
/*0140*/ IADD3 R5, R15, 0x2, RZ ; /* 0x000000020f057810 */
/* 0x001fc40007ffe0ff */
/*0150*/ LEA.HI.SX32 R12, R12, R9, 0x19 ; /* 0x000000090c0c7211 */
/* 0x000fe400078fcaff */
/*0160*/ IMNMX R5, R5, 0xd, PT ; /* 0x0000000d05057817 */
/* 0x000fc60003800200 */
/*0170*/ IMAD R6, R12, -0x190, R7 ; /* 0xfffffe700c067824 */
/* 0x000fe200078e0207 */
/*0180*/ ISETP.GE.AND P0, PT, R10, R5, PT ; /* 0x000000050a00720c */
/* 0x000fe40003f06270 */
/*0190*/ SHF.R.S32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x000fe20000011400 */
/*01a0*/ IMAD.HI R13, R6, 0x66666667, RZ ; /* 0x66666667060d7827 */
/* 0x000fca00078e02ff */
/*01b0*/ SHF.R.U32.HI R8, RZ, 0x1f, R13 ; /* 0x0000001fff087819 */
/* 0x000fc8000001160d */
/*01c0*/ LEA.HI.SX32 R13, R13, R8, 0x1d ; /* 0x000000080d0d7211 */
/* 0x000fc800078feaff */
/*01d0*/ IADD3 R8, R13.reuse, -0x1, RZ ; /* 0xffffffff0d087810 */
/* 0x040fe20007ffe0ff */
/*01e0*/ IMAD R14, R13, -0x14, R6 ; /* 0xffffffec0d0e7824 */
/* 0x000fe200078e0206 */
/*01f0*/ IADD3 R6, R12, -0x1, RZ ; /* 0xffffffff0c067810 */
/* 0x000fe40007ffe0ff */
/*0200*/ IMNMX R8, RZ, R8, !PT ; /* 0x00000008ff087217 */
/* 0x000fe40007800200 */
/*0210*/ IADD3 R9, R14, -0x1, RZ ; /* 0xffffffff0e097810 */
/* 0x000fe40007ffe0ff */
/*0220*/ IMNMX R6, RZ, R6, !PT ; /* 0x00000006ff067217 */
/* 0x000fe40007800200 */
/*0230*/ IMNMX R9, RZ, R9, !PT ; /* 0x00000009ff097217 */
/* 0x000fe20007800200 */
/*0240*/ @P0 BRA 0xb50 ; /* 0x0000090000000947 */
/* 0x000fea0003800000 */
/*0250*/ IMAD R16, R15, 0xd, R12 ; /* 0x0000000d0f107824 */
/* 0x000fe200078e020c */
/*0260*/ IADD3 R14, R14, 0x2, RZ ; /* 0x000000020e0e7810 */
/* 0x000fe20007ffe0ff */
/*0270*/ IMAD.MOV.U32 R15, RZ, RZ, 0x14 ; /* 0x00000014ff0f7424 */
/* 0x000fe200078e00ff */
/*0280*/ IADD3 R17, R13, 0x2, RZ ; /* 0x000000020d117810 */
/* 0x000fe20007ffe0ff */
/*0290*/ IMAD R16, R16, 0x14, R13 ; /* 0x0000001410107824 */
/* 0x000fe200078e020d */
/*02a0*/ IADD3 R13, R9.reuse, 0x1, RZ ; /* 0x00000001090d7810 */
/* 0x040fe20007ffe0ff */
/*02b0*/ IMAD.MOV R11, RZ, RZ, -R2 ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e0a02 */
/*02c0*/ IADD3 R23, R9, 0x3, RZ ; /* 0x0000000309177810 */
/* 0x000fe20007ffe0ff */
/*02d0*/ IMAD R2, R16, R15, -0x3 ; /* 0xfffffffd10027424 */
/* 0x000fe200078e020f */
/*02e0*/ IADD3 R15, R12, 0x2, RZ ; /* 0x000000020c0f7810 */
/* 0x000fc40007ffe0ff */
/*02f0*/ IADD3 R12, R9, 0x2, RZ ; /* 0x00000002090c7810 */
/* 0x000fe20007ffe0ff */
/*0300*/ IMAD R2, R11, c[0x0][0x0], R2 ; /* 0x000000000b027a24 */
/* 0x000fe200078e0202 */
/*0310*/ IMNMX R14, R14, 0x14, PT ; /* 0x000000140e0e7817 */
/* 0x000fe20003800200 */
/*0320*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e00ff */
/*0330*/ IMNMX R15, R15, 0xd, PT ; /* 0x0000000d0f0f7817 */
/* 0x000fe20003800200 */
/*0340*/ IMAD.IADD R2, R2, 0x1, -R3 ; /* 0x0000000102027824 */
/* 0x000fe200078e0a03 */
/*0350*/ IMNMX R17, R17, 0x14, PT ; /* 0x0000001411117817 */
/* 0x000fc80003800200 */
/*0360*/ IMNMX R2, R2, -0x15, !PT ; /* 0xffffffeb02027817 */
/* 0x000fc80007800200 */
/*0370*/ IADD3 R3, -R9, -0x2, -R2 ; /* 0xfffffffe09037810 */
/* 0x000fe40007ffe902 */
/*0380*/ LOP3.LUT R2, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff027212 */
/* 0x000fe400078e33ff */
/*0390*/ ISETP.GE.U32.AND P1, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fc60003f26070 */
/*03a0*/ IMAD.IADD R2, R2, 0x1, -R9 ; /* 0x0000000102027824 */
/* 0x000fca00078e0a09 */
/*03b0*/ LOP3.LUT R16, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302107812 */
/* 0x000fe400078ec0ff */
/*03c0*/ ISETP.GE.AND P0, PT, R6, R15, PT ; /* 0x0000000f0600720c */
/* 0x000fe20003f06270 */
/*03d0*/ BSSY B2, 0xb20 ; /* 0x0000074000027945 */
/* 0x000fd80003800000 */
/*03e0*/ @P0 BRA 0xb10 ; /* 0x0000072000000947 */
/* 0x000fea0003800000 */
/*03f0*/ IMAD.MOV.U32 R18, RZ, RZ, R6 ; /* 0x000000ffff127224 */
/* 0x000fe400078e0006 */
/*0400*/ ISETP.GE.AND P0, PT, R8, R17, PT ; /* 0x000000110800720c */
/* 0x000fe20003f06270 */
/*0410*/ BSSY B1, 0xae0 ; /* 0x000006c000017945 */
/* 0x000fd80003800000 */
/*0420*/ @P0 BRA 0xad0 ; /* 0x000006a000000947 */
/* 0x000fea0003800000 */
/*0430*/ IMAD R19, R10, 0xd, R18 ; /* 0x0000000d0a137824 */
/* 0x000fe400078e0212 */
/*0440*/ IMAD.MOV.U32 R20, RZ, RZ, R8 ; /* 0x000000ffff147224 */
/* 0x000fe400078e0008 */
/*0450*/ IMAD R19, R19, 0x190, RZ ; /* 0x0000019013137824 */
/* 0x000fe400078e02ff */
/*0460*/ ISETP.GE.AND P0, PT, R9, R14, PT ; /* 0x0000000e0900720c */
/* 0x000fe20003f06270 */
/*0470*/ BSSY B0, 0xaa0 ; /* 0x0000062000007945 */
/* 0x000fd80003800000 */
/*0480*/ @P0 BRA 0xa90 ; /* 0x0000060000000947 */
/* 0x000fea0003800000 */
/*0490*/ ISETP.NE.AND P0, PT, R16, RZ, PT ; /* 0x000000ff1000720c */
/* 0x000fe20003f05270 */
/*04a0*/ BSSY B4, 0x5e0 ; /* 0x0000013000047945 */
/* 0x000fe20003800000 */
/*04b0*/ IMAD.MOV.U32 R21, RZ, RZ, R9 ; /* 0x000000ffff157224 */
/* 0x000fd600078e0009 */
/*04c0*/ @!P0 BRA 0x5d0 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*04d0*/ IMAD R2, R20, 0x14, R19 ; /* 0x0000001414027824 */
/* 0x000fe400078e0213 */
/*04e0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe400078e00ff */
/*04f0*/ IMAD.IADD R2, R9, 0x1, R2 ; /* 0x0000000109027824 */
/* 0x000fc800078e0202 */
/*0500*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0510*/ LDG.E R22, [R2.64] ; /* 0x0000000402167981 */
/* 0x000ea2000c1e1900 */
/*0520*/ ISETP.NE.AND P0, PT, R16, 0x1, PT ; /* 0x000000011000780c */
/* 0x000fe20003f05270 */
/*0530*/ IMAD.MOV.U32 R21, RZ, RZ, R13 ; /* 0x000000ffff157224 */
/* 0x000fe400078e000d */
/*0540*/ IMAD.IADD R11, R11, 0x1, R22 ; /* 0x000000010b0b7824 */
/* 0x004fd400078e0216 */
/*0550*/ @!P0 BRA 0x5d0 ; /* 0x0000007000008947 */
/* 0x000fea0003800000 */
/*0560*/ ISETP.NE.AND P0, PT, R16, 0x2, PT ; /* 0x000000021000780c */
/* 0x000fe20003f05270 */
/*0570*/ LDG.E R22, [R2.64+0x4] ; /* 0x0000040402167981 */
/* 0x000e98000c1e1900 */
/*0580*/ @P0 LDG.E R24, [R2.64+0x8] ; /* 0x0000080402180981 */
/* 0x000ee2000c1e1900 */
/*0590*/ IMAD.MOV.U32 R21, RZ, RZ, R12 ; /* 0x000000ffff157224 */
/* 0x000fe400078e000c */
/*05a0*/ @P0 IMAD.MOV.U32 R21, RZ, RZ, R23 ; /* 0x000000ffff150224 */
/* 0x000fe400078e0017 */
/*05b0*/ IMAD.IADD R11, R11, 0x1, R22 ; /* 0x000000010b0b7824 */
/* 0x004fc800078e0216 */
/*05c0*/ @P0 IMAD.IADD R11, R11, 0x1, R24 ; /* 0x000000010b0b0824 */
/* 0x008fe400078e0218 */
/*05d0*/ BSYNC B4 ; /* 0x0000000000047941 */
/* 0x000fea0003800000 */
/*05e0*/ @!P1 BRA 0xa90 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*05f0*/ IMAD.IADD R2, R14, 0x1, -R21.reuse ; /* 0x000000010e027824 */
/* 0x100fe200078e0a15 */
/*0600*/ BSSY B4, 0x8a0 ; /* 0x0000029000047945 */
/* 0x000fe20003800000 */
/*0610*/ IMAD.IADD R3, R19, 0x1, R21 ; /* 0x0000000113037824 */
/* 0x000fe200078e0215 */
/*0620*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0f070 */
/*0630*/ ISETP.GT.AND P2, PT, R2, 0xc, PT ; /* 0x0000000c0200780c */
/* 0x000fe20003f44270 */
/*0640*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fe400078e00ff */
/*0650*/ IMAD R3, R20, 0x14, R3 ; /* 0x0000001414037824 */
/* 0x000fc800078e0203 */
/*0660*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fcc00078e0202 */
/*0670*/ @!P2 BRA 0x890 ; /* 0x000002100000a947 */
/* 0x000fea0003800000 */
/*0680*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0690*/ IADD3 R22, R14, -0xc, RZ ; /* 0xfffffff40e167810 */
/* 0x000fe40007ffe0ff */
/*06a0*/ LDG.E R24, [R2.64] ; /* 0x0000000402187981 */
/* 0x000ea8000c1e1900 */
/*06b0*/ LDG.E R25, [R2.64+0x4] ; /* 0x0000040402197981 */
/* 0x000ea8000c1e1900 */
/*06c0*/ LDG.E R27, [R2.64+0x8] ; /* 0x00000804021b7981 */
/* 0x000ee8000c1e1900 */
/*06d0*/ LDG.E R26, [R2.64+0xc] ; /* 0x00000c04021a7981 */
/* 0x000ee2000c1e1900 */
/*06e0*/ IADD3 R24, R25, R24, R11 ; /* 0x0000001819187210 */
/* 0x004fc60007ffe00b */
/*06f0*/ LDG.E R11, [R2.64+0x10] ; /* 0x00001004020b7981 */
/* 0x000ea8000c1e1900 */
/*0700*/ LDG.E R25, [R2.64+0x14] ; /* 0x0000140402197981 */
/* 0x000ea2000c1e1900 */
/*0710*/ IADD3 R24, R26, R27, R24 ; /* 0x0000001b1a187210 */
/* 0x008fc60007ffe018 */
/*0720*/ LDG.E R26, [R2.64+0x18] ; /* 0x00001804021a7981 */
/* 0x000ee8000c1e1900 */
/*0730*/ LDG.E R27, [R2.64+0x1c] ; /* 0x00001c04021b7981 */
/* 0x000ee2000c1e1900 */
/*0740*/ IADD3 R11, R25, R11, R24 ; /* 0x0000000b190b7210 */
/* 0x004fc60007ffe018 */
/*0750*/ LDG.E R24, [R2.64+0x20] ; /* 0x0000200402187981 */
/* 0x000ea8000c1e1900 */
/*0760*/ LDG.E R25, [R2.64+0x24] ; /* 0x0000240402197981 */
/* 0x000ea2000c1e1900 */
/*0770*/ IADD3 R11, R27, R26, R11 ; /* 0x0000001a1b0b7210 */
/* 0x008fc60007ffe00b */
/*0780*/ LDG.E R26, [R2.64+0x28] ; /* 0x00002804021a7981 */
/* 0x000ee8000c1e1900 */
/*0790*/ LDG.E R27, [R2.64+0x2c] ; /* 0x00002c04021b7981 */
/* 0x000ee2000c1e1900 */
/*07a0*/ IADD3 R11, R25, R24, R11 ; /* 0x00000018190b7210 */
/* 0x004fc60007ffe00b */
/*07b0*/ LDG.E R25, [R2.64+0x30] ; /* 0x0000300402197981 */
/* 0x000ea8000c1e1900 */
/*07c0*/ LDG.E R24, [R2.64+0x38] ; /* 0x0000380402187981 */
/* 0x000f22000c1e1900 */
/*07d0*/ IADD3 R26, R27, R26, R11 ; /* 0x0000001a1b1a7210 */
/* 0x008fc60007ffe00b */
/*07e0*/ LDG.E R27, [R2.64+0x34] ; /* 0x00003404021b7981 */
/* 0x000ea8000c1e1900 */
/*07f0*/ LDG.E R11, [R2.64+0x3c] ; /* 0x00003c04020b7981 */
/* 0x000f22000c1e1900 */
/*0800*/ IADD3 R21, R21, 0x10, RZ ; /* 0x0000001015157810 */
/* 0x000fc80007ffe0ff */
/*0810*/ ISETP.GE.AND P2, PT, R21, R22, PT ; /* 0x000000161500720c */
/* 0x000fe40003f46270 */
/*0820*/ IADD3 R25, R27, R25, R26 ; /* 0x000000191b197210 */
/* 0x004fe40007ffe01a */
/*0830*/ IADD3 R26, P3, R2, 0x40, RZ ; /* 0x00000040021a7810 */
/* 0x000fca0007f7e0ff */
/*0840*/ IMAD.X R27, RZ, RZ, R3, P3 ; /* 0x000000ffff1b7224 */
/* 0x000fe200018e0603 */
/*0850*/ IADD3 R11, R11, R24, R25 ; /* 0x000000180b0b7210 */
/* 0x010fe20007ffe019 */
/*0860*/ IMAD.MOV.U32 R2, RZ, RZ, R26 ; /* 0x000000ffff027224 */
/* 0x000fe400078e001a */
/*0870*/ IMAD.MOV.U32 R3, RZ, RZ, R27 ; /* 0x000000ffff037224 */
/* 0x000fe200078e001b */
/*0880*/ @!P2 BRA 0x6a0 ; /* 0xfffffe100000a947 */
/* 0x000fea000383ffff */
/*0890*/ BSYNC B4 ; /* 0x0000000000047941 */
/* 0x000fea0003800000 */
/*08a0*/ IMAD.IADD R22, R14, 0x1, -R21 ; /* 0x000000010e167824 */
/* 0x000fe200078e0a15 */
/*08b0*/ BSSY B4, 0xa10 ; /* 0x0000015000047945 */
/* 0x000fe80003800000 */
/*08c0*/ ISETP.GT.AND P2, PT, R22, 0x4, PT ; /* 0x000000041600780c */
/* 0x000fda0003f44270 */
/*08d0*/ @!P2 BRA 0xa00 ; /* 0x000001200000a947 */
/* 0x000fea0003800000 */
/*08e0*/ LDG.E R22, [R2.64] ; /* 0x0000000402167981 */
/* 0x000ea8000c1e1900 */
/*08f0*/ LDG.E R24, [R2.64+0x4] ; /* 0x0000040402187981 */
/* 0x000ea8000c1e1900 */
/*0900*/ LDG.E R25, [R2.64+0x8] ; /* 0x0000080402197981 */
/* 0x000ee8000c1e1900 */
/*0910*/ LDG.E R26, [R2.64+0xc] ; /* 0x00000c04021a7981 */
/* 0x000ee2000c1e1900 */
/*0920*/ IADD3 R22, R24, R22, R11 ; /* 0x0000001618167210 */
/* 0x004fc60007ffe00b */
/*0930*/ LDG.E R11, [R2.64+0x10] ; /* 0x00001004020b7981 */
/* 0x000ea8000c1e1900 */
/*0940*/ LDG.E R24, [R2.64+0x14] ; /* 0x0000140402187981 */
/* 0x000ea2000c1e1900 */
/*0950*/ IADD3 R22, R26, R25, R22 ; /* 0x000000191a167210 */
/* 0x008fc60007ffe016 */
/*0960*/ LDG.E R26, [R2.64+0x18] ; /* 0x00001804021a7981 */
/* 0x000ee8000c1e1900 */
/*0970*/ LDG.E R25, [R2.64+0x1c] ; /* 0x00001c0402197981 */
/* 0x000ee2000c1e1900 */
/*0980*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0990*/ IADD3 R21, R21, 0x8, RZ ; /* 0x0000000815157810 */
/* 0x000fe40007ffe0ff */
/*09a0*/ IADD3 R11, R24, R11, R22 ; /* 0x0000000b180b7210 */
/* 0x004fe40007ffe016 */
/*09b0*/ IADD3 R22, P2, R2, 0x20, RZ ; /* 0x0000002002167810 */
/* 0x000fca0007f5e0ff */
/*09c0*/ IMAD.X R27, RZ, RZ, R3, P2 ; /* 0x000000ffff1b7224 */
/* 0x000fe200010e0603 */
/*09d0*/ IADD3 R11, R25, R26, R11 ; /* 0x0000001a190b7210 */
/* 0x008fe20007ffe00b */
/*09e0*/ IMAD.MOV.U32 R2, RZ, RZ, R22 ; /* 0x000000ffff027224 */
/* 0x000fe400078e0016 */
/*09f0*/ IMAD.MOV.U32 R3, RZ, RZ, R27 ; /* 0x000000ffff037224 */
/* 0x000fe400078e001b */
/*0a00*/ BSYNC B4 ; /* 0x0000000000047941 */
/* 0x000fea0003800000 */
/*0a10*/ ISETP.LT.OR P0, PT, R21, R14, P0 ; /* 0x0000000e1500720c */
/* 0x000fda0000701670 */
/*0a20*/ @!P0 BRA 0xa90 ; /* 0x0000006000008947 */
/* 0x000fea0003800000 */
/*0a30*/ LDG.E R22, [R2.64] ; /* 0x0000000402167981 */
/* 0x000ea8000c1e1900 */
/*0a40*/ LDG.E R21, [R2.64+0x4] ; /* 0x0000040402157981 */
/* 0x000ea8000c1e1900 */
/*0a50*/ LDG.E R24, [R2.64+0x8] ; /* 0x0000080402187981 */
/* 0x000ee8000c1e1900 */
/*0a60*/ LDG.E R25, [R2.64+0xc] ; /* 0x00000c0402197981 */
/* 0x000ee2000c1e1900 */
/*0a70*/ IADD3 R11, R21, R22, R11 ; /* 0x00000016150b7210 */
/* 0x004fc80007ffe00b */
/*0a80*/ IADD3 R11, R25, R24, R11 ; /* 0x00000018190b7210 */
/* 0x008fe40007ffe00b */
/*0a90*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0aa0*/ IADD3 R20, R20, 0x1, RZ ; /* 0x0000000114147810 */
/* 0x000fc80007ffe0ff */
/*0ab0*/ ISETP.GE.AND P0, PT, R20, R17, PT ; /* 0x000000111400720c */
/* 0x000fda0003f06270 */
/*0ac0*/ @!P0 BRA 0x460 ; /* 0xfffff99000008947 */
/* 0x000fea000383ffff */
/*0ad0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0ae0*/ IADD3 R18, R18, 0x1, RZ ; /* 0x0000000112127810 */
/* 0x000fc80007ffe0ff */
/*0af0*/ ISETP.GE.AND P0, PT, R18, R15, PT ; /* 0x0000000f1200720c */
/* 0x000fda0003f06270 */
/*0b00*/ @!P0 BRA 0x400 ; /* 0xfffff8f000008947 */
/* 0x000fea000383ffff */
/*0b10*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0b20*/ IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a7810 */
/* 0x000fc80007ffe0ff */
/*0b30*/ ISETP.GE.AND P0, PT, R10, R5, PT ; /* 0x000000050a00720c */
/* 0x000fda0003f06270 */
/*0b40*/ @!P0 BRA 0x3c0 ; /* 0xfffff87000008947 */
/* 0x000fea000383ffff */
/*0b50*/ BSYNC B3 ; /* 0x0000000000037941 */
/* 0x000fea0003800000 */
/*0b60*/ IMAD.IADD R11, R11, 0x1, -R4 ; /* 0x000000010b0b7824 */
/* 0x020fca00078e0a04 */
/*0b70*/ LOP3.LUT R2, R11.reuse, 0xfffffffe, RZ, 0xc0, !PT ; /* 0xfffffffe0b027812 */
/* 0x040fe400078ec0ff */
/*0b80*/ ISETP.NE.AND P1, PT, R11, 0x3, PT ; /* 0x000000030b00780c */
/* 0x000fe40003f25270 */
/*0b90*/ ISETP.NE.AND P0, PT, R2, 0x2, PT ; /* 0x000000020200780c */
/* 0x000fe40003f05270 */
/*0ba0*/ ISETP.EQ.AND P1, PT, R4.reuse, RZ, !P1 ; /* 0x000000ff0400720c */
/* 0x040fe40004f22270 */
/*0bb0*/ ISETP.EQ.AND P0, PT, R4, 0x1, P0 ; /* 0x000000010400780c */
/* 0x000fe40000702270 */
/*0bc0*/ LEA R2, P2, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */
/* 0x000fc400078410ff */
/*0bd0*/ SEL R4, R4, 0x1, !P1 ; /* 0x0000000104047807 */
/* 0x000fe40004800000 */
/*0be0*/ LEA.HI.X R3, R0, c[0x0][0x16c], R7, 0x2, P2 ; /* 0x00005b0000037a11 */
/* 0x000fe400010f1407 */
/*0bf0*/ SEL R5, R4, RZ, !P0 ; /* 0x000000ff04057207 */
/* 0x000fca0004000000 */
/*0c00*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0c10*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c20*/ BRA 0xc20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
constexpr int LEN_INITAL = 8;
constexpr int STEPS = 6;
constexpr int LEN_W = 1 + 2 * STEPS;
constexpr int LEN_Z = LEN_W;
constexpr int LEN_Y = LEN_INITAL + 2 * STEPS;
constexpr int LEN_X = LEN_INITAL + 2 * STEPS;
constexpr int LEN_TOTAL = LEN_W * LEN_Z * LEN_Y * LEN_X;
constexpr int SIZE_W = LEN_Z * LEN_Y * LEN_X;
constexpr int SIZE_Z = LEN_Y * LEN_X;
constexpr int SIZE_Y = LEN_X;
constexpr int NUM_THREADS = 512;
constexpr int NUM_BLOCKS = LEN_TOTAL / NUM_THREADS;
inline int coord_to_idx(const int w, const int z, const int y, const int x) {
return
w * SIZE_W +
z * SIZE_Z +
y * SIZE_Y +
x;
}
inline void print_slice(const int* grid, const int w, const int z) {
for (int row = 0; row < LEN_Y; ++row) {
for (int col = 0; col < LEN_X; ++col) {
std::cout << grid[coord_to_idx(w, z, row, col)] << " ";
}
std::cout << "\n";
}
}
__device__
int coord_to_idx_dev(const int w, const int z, const int y, const int x) {
return w * SIZE_W + z * SIZE_Z + y * SIZE_Y + x;
}
__global__
void step(const int* grid, int* grid_next) {
// Find out where we are.
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < LEN_TOTAL) {
int left = idx;
int w = idx / SIZE_W;
left = idx - w * SIZE_W;
int z = left / SIZE_Z;
left = left - z * SIZE_Z;
int y = left / SIZE_Y;
int x = left - y * SIZE_Y;
// TODO: for loop here?
const int active = grid[idx];
// Count active neighbors.
int active_neighbors = 0;
int min_nw = max(0, w - 1);
int max_nw = min(LEN_W, w + 2);
int min_nz = max(0, z - 1);
int max_nz = min(LEN_Z, z + 2);
int min_ny = max(0, y - 1);
int max_ny = min(LEN_Y, y + 2);
int min_nx = max(0, x - 1);
int max_nx = min(LEN_X, x + 2);
for (int nw = min_nw; nw < max_nw; ++nw) {
for (int nz = min_nz; nz < max_nz; ++nz) {
for (int ny = min_ny; ny < max_ny; ++ny) {
for (int nx = min_nx; nx < max_nx; ++nx) {
active_neighbors += grid[coord_to_idx_dev(nw, nz, ny, nx)];
}
}
}
}
active_neighbors -= active;
// Rules
int active_next = active;
if (active == 1 && (active_neighbors < 2 || active_neighbors > 3)) {
active_next = 0;
}
else if (active == 0 && active_neighbors == 3) {
active_next = 1;
}
//active_next = idx;
grid_next[idx] = active_next;
}
}
int main() {
// Initialize grid.
int* grid;
cudaMallocManaged(&grid, LEN_TOTAL * sizeof(int));
cudaMemset(grid, 0, LEN_TOTAL);
int initial_grid[LEN_INITAL][LEN_INITAL] = {
{1, 1, 0, 0, 1, 0, 1, 0},
{1, 1, 1, 0, 1, 0, 1, 1},
{0, 0, 1, 1, 1, 0, 0, 1},
{0, 1, 0, 0, 0, 0, 1, 1},
{0, 1, 0, 0, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 1, 0},
{1, 0, 1, 1, 0, 1, 0, 1}
};
for (int row = 0; row < LEN_INITAL; ++row) {
for (int col = 0; col < LEN_INITAL; ++col) {
grid[coord_to_idx(STEPS, STEPS, STEPS + row, STEPS + col)] = initial_grid[row][col];
}
}
//print_slice(grid, STEPS, STEPS);
int* grid_next;
cudaMallocManaged(&grid_next, LEN_TOTAL * sizeof(int));
for (int i = 0; i < STEPS; ++i) {
//std::cout << "Step " << i << "\n";
step<<<NUM_BLOCKS, NUM_THREADS>>>(grid, grid_next);
cudaDeviceSynchronize();
std::swap(grid, grid_next);
//print_slice(grid, STEPS, STEPS);
}
// Count actives.
int count = 0;
for (int i = 0; i < LEN_TOTAL; ++i) {
count += grid[i];
}
std::cout << "Active: " << count << "\n";
cudaFree(grid);
cudaFree(grid_next);
return 0;
} | .file "tmpxft_00178d59_00000000-6_17.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3676:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3676:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16coord_to_idx_deviiii
.type _Z16coord_to_idx_deviiii, @function
_Z16coord_to_idx_deviiii:
.LFB3671:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE3671:
.size _Z16coord_to_idx_deviiii, .-_Z16coord_to_idx_deviiii
.globl _Z26__device_stub__Z4stepPKiPiPKiPi
.type _Z26__device_stub__Z4stepPKiPiPKiPi, @function
_Z26__device_stub__Z4stepPKiPiPKiPi:
.LFB3698:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4stepPKiPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3698:
.size _Z26__device_stub__Z4stepPKiPiPKiPi, .-_Z26__device_stub__Z4stepPKiPiPKiPi
.globl _Z4stepPKiPi
.type _Z4stepPKiPi, @function
_Z4stepPKiPi:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z4stepPKiPiPKiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _Z4stepPKiPi, .-_Z4stepPKiPi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Active: "
.LC1:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB3672:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $320, %rsp
.cfi_def_cfa_offset 336
movq %fs:40, %rax
movq %rax, 312(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $1, %edx
movl $270400, %esi
call cudaMallocManaged@PLT
movl $67600, %edx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $0, 56(%rsp)
movl $0, 60(%rsp)
movl $1, 64(%rsp)
movl $0, 68(%rsp)
movl $1, 72(%rsp)
movl $0, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, 92(%rsp)
movl $1, 96(%rsp)
movl $0, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $0, 112(%rsp)
movl $0, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $1, 128(%rsp)
movl $0, 132(%rsp)
movl $0, 136(%rsp)
movl $1, 140(%rsp)
movl $0, 144(%rsp)
movl $1, 148(%rsp)
movl $0, 152(%rsp)
movl $0, 156(%rsp)
movl $0, 160(%rsp)
movl $0, 164(%rsp)
movl $1, 168(%rsp)
movl $1, 172(%rsp)
movl $0, 176(%rsp)
movl $1, 180(%rsp)
movl $0, 184(%rsp)
movl $0, 188(%rsp)
movl $1, 192(%rsp)
movl $1, 196(%rsp)
movl $1, 200(%rsp)
movl $1, 204(%rsp)
movl $1, 208(%rsp)
movl $1, 212(%rsp)
movl $1, 216(%rsp)
movl $1, 220(%rsp)
movl $1, 224(%rsp)
movl $0, 228(%rsp)
movl $0, 232(%rsp)
movl $0, 236(%rsp)
movl $1, 240(%rsp)
movl $1, 244(%rsp)
movl $1, 248(%rsp)
movl $1, 252(%rsp)
movl $1, 256(%rsp)
movl $1, 260(%rsp)
movl $1, 264(%rsp)
movl $0, 268(%rsp)
movl $1, 272(%rsp)
movl $0, 276(%rsp)
movl $1, 280(%rsp)
movl $1, 284(%rsp)
movl $0, 288(%rsp)
movl $1, 292(%rsp)
movl $0, 296(%rsp)
movl $1, 300(%rsp)
leaq 48(%rsp), %rdi
leaq -336(%rsp), %r8
movl $134936, %esi
.L14:
leaq -32(%rsi), %rax
.L15:
movl -134904(%rdi,%rax), %ecx
movq 8(%rsp), %rdx
movl %ecx, (%rdx,%rax)
addq $4, %rax
cmpq %rsi, %rax
jne .L15
subq $48, %rdi
addq $80, %rsi
cmpq %r8, %rdi
jne .L14
leaq 16(%rsp), %rdi
movl $1, %edx
movl $270400, %esi
call cudaMallocManaged@PLT
movl $6, %ebx
jmp .L18
.L17:
call cudaDeviceSynchronize@PLT
movq 8(%rsp), %rax
movq 16(%rsp), %rcx
movq %rcx, 8(%rsp)
movq %rax, 16(%rsp)
subl $1, %ebx
je .L25
.L18:
movl $512, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $132, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L17
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z26__device_stub__Z4stepPKiPiPKiPi
jmp .L17
.L25:
movq %rcx, %rax
addq $270400, %rcx
movl $0, %edx
.L19:
movl %edx, %ebx
addl (%rax), %ebx
movl %ebx, %edx
addq $4, %rax
cmpq %rcx, %rax
jne .L19
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 312(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $320, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3672:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z4stepPKiPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3701:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z4stepPKiPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.