system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00197b7e_00000000-6_add-2dArrays.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12random_initsPA20_i
.type _Z12random_initsPA20_i, @function
_Z12random_initsPA20_i:
.LFB3669:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
leaq 80(%rdi), %rbp
addq $1680, %r12
.L4:
leaq -80(%rbp), %rbx
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $80, %rbp
cmpq %r12, %rbp
jne .L4
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z12random_initsPA20_i, .-_Z12random_initsPA20_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "matrix["
.LC1:
.string "]["
.LC2:
.string "]"
.LC3:
.string " = "
.LC4:
.string " + "
.LC5:
.string "="
.LC6:
.string "\t"
.text
.globl _Z4showPA20_iS0_S0_
.type _Z4showPA20_iS0_S0_, @function
_Z4showPA20_iS0_S0_:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 8(%rsp)
movl $0, 4(%rsp)
leaq _ZSt4cout(%rip), %r12
movq %rdx, 16(%rsp)
movq %rsi, %rbp
jmp .L10
.L18:
call _ZSt16__throw_bad_castv@PLT
.L13:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
.L14:
movsbl %sil, %esi
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addl $1, 4(%rsp)
movl 4(%rsp), %eax
addq $80, 8(%rsp)
addq $80, %rbp
addq $80, 16(%rsp)
cmpl $20, %eax
je .L9
.L10:
movq 8(%rsp), %r15
movq %rbp, %r14
movq 16(%rsp), %r13
movl $0, %ebx
movq %rbp, 24(%rsp)
.L11:
movl $7, %edx
leaq .LC0(%rip), %rsi
movq %r12, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl 4(%rsp), %esi
movq %r12, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $2, %edx
leaq .LC1(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $1, %edx
leaq .LC2(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl $3, %edx
leaq .LC3(%rip), %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl (%r15,%rbx,4), %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $3, %edx
leaq .LC4(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl (%r14,%rbx,4), %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $1, %edx
leaq .LC5(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl 0(%r13,%rbx,4), %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
leaq .LC6(%rip), %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $1, %rbx
cmpq $20, %rbx
jne .L11
movq 24(%rsp), %rbp
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %rbx
testq %rbx, %rbx
je .L18
cmpb $0, 56(%rbx)
je .L13
movzbl 67(%rbx), %esi
jmp .L14
.L9:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z4showPA20_iS0_S0_, .-_Z4showPA20_iS0_S0_
.globl _Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_
.type _Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_, @function
_Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_:
.LFB3696:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPA20_iS0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_, .-_Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_
.globl _Z3addPA20_iS0_S0_
.type _Z3addPA20_iS0_S0_, @function
_Z3addPA20_iS0_S0_:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z3addPA20_iS0_S0_, .-_Z3addPA20_iS0_S0_
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $4096, %rsp
.cfi_def_cfa_offset 4120
orq $0, (%rsp)
subq $776, %rsp
.cfi_def_cfa_offset 4896
movq %fs:40, %rax
movq %rax, 4856(%rsp)
xorl %eax, %eax
leaq 48(%rsp), %rbp
movq %rbp, %rdi
call _Z12random_initsPA20_i
leaq 1648(%rsp), %rbx
movq %rbx, %rdi
call _Z12random_initsPA20_i
movq %rsp, %rdi
movl $1600, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $1600, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $1600, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $1600, %edx
movq %rbp, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1600, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 3248(%rsp), %rsi
movl $1, %ecx
movl $1600, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $20, 24(%rsp)
movl $20, 28(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L28:
leaq 3248(%rsp), %rbx
movl $2, %ecx
movl $1600, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq 1648(%rsp), %rsi
leaq 48(%rsp), %rdi
movq %rbx, %rdx
call _Z4showPA20_iS0_S0_
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 4856(%rsp), %rax
subq %fs:40, %rax
jne .L32
movl $0, %eax
addq $4872, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_
jmp .L28
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z3addPA20_iS0_S0_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPA20_iS0_S0_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "add-2dArrays.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z12random_initsPA20_i # -- Begin function _Z12random_initsPA20_i
.p2align 4, 0x90
.type _Z12random_initsPA20_i,@function
_Z12random_initsPA20_i: # @_Z12random_initsPA20_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB0_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # Parent Loop BB0_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq $20, %r15
jne .LBB0_2
# %bb.3: # in Loop: Header=BB0_1 Depth=1
incq %r14
addq $80, %rbx
cmpq $20, %r14
jne .LBB0_1
# %bb.4:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z12random_initsPA20_i, .Lfunc_end0-_Z12random_initsPA20_i
.cfi_endproc
# -- End function
.globl _Z18__device_stub__addPA20_iS0_S0_ # -- Begin function _Z18__device_stub__addPA20_iS0_S0_
.p2align 4, 0x90
.type _Z18__device_stub__addPA20_iS0_S0_,@function
_Z18__device_stub__addPA20_iS0_S0_: # @_Z18__device_stub__addPA20_iS0_S0_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPA20_iS0_S0_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z18__device_stub__addPA20_iS0_S0_, .Lfunc_end1-_Z18__device_stub__addPA20_iS0_S0_
.cfi_endproc
# -- End function
.globl _Z4showPA20_iS0_S0_ # -- Begin function _Z4showPA20_iS0_S0_
.p2align 4, 0x90
.type _Z4showPA20_iS0_S0_,@function
_Z4showPA20_iS0_S0_: # @_Z4showPA20_iS0_S0_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
xorl %r12d, %r12d
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_6: # in Loop: Header=BB2_1 Depth=1
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB2_1 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r12
addq $80, %rbx
addq $80, %r14
addq $80, %r15
cmpq $20, %r12
je .LBB2_8
.LBB2_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r12d, %esi
callq _ZNSolsEi
movq %rax, %rbp
movl $.L.str.1, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbp, %rdi
movl %r13d, %esi
callq _ZNSolsEi
movq %rax, %rbp
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.3, %esi
movl $3, %edx
movq %rbp, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%r15,%r13,4), %esi
movq %rbp, %rdi
callq _ZNSolsEi
movq %rax, %rbp
movl $.L.str.4, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%r14,%r13,4), %esi
movq %rbp, %rdi
callq _ZNSolsEi
movq %rax, %rbp
movl $.L.str.5, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%rbx,%r13,4), %esi
movq %rbp, %rdi
callq _ZNSolsEi
movl $.L.str.6, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r13
cmpq $20, %r13
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r13
testq %r13, %r13
je .LBB2_9
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB2_1 Depth=1
cmpb $0, 56(%r13)
je .LBB2_6
# %bb.5: # in Loop: Header=BB2_1 Depth=1
movzbl 67(%r13), %eax
jmp .LBB2_7
.LBB2_8:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_9:
.cfi_def_cfa_offset 64
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size _Z4showPA20_iS0_S0_, .Lfunc_end2-_Z4showPA20_iS0_S0_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $4928, %rsp # imm = 0x1340
.cfi_def_cfa_offset 4960
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 1728(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_1: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB3_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_2: # Parent Loop BB3_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq $20, %r15
jne .LBB3_2
# %bb.3: # in Loop: Header=BB3_1 Depth=1
incq %r14
addq $80, %rbx
cmpq $20, %r14
jne .LBB3_1
# %bb.4: # %.preheader.i5.preheader
leaq 128(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_5: # %.preheader.i5
# =>This Loop Header: Depth=1
# Child Loop BB3_6 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_6: # Parent Loop BB3_5 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq $20, %r15
jne .LBB3_6
# %bb.7: # in Loop: Header=BB3_5 Depth=1
incq %r14
addq $80, %rbx
cmpq $20, %r14
jne .LBB3_5
# %bb.8: # %_Z12random_initsPA20_i.exit12
leaq 16(%rsp), %rdi
movl $1600, %esi # imm = 0x640
callq hipMalloc
leaq 8(%rsp), %rdi
movl $1600, %esi # imm = 0x640
callq hipMalloc
movq %rsp, %rdi
movl $1600, %esi # imm = 0x640
callq hipMalloc
movq 16(%rsp), %rdi
leaq 1728(%rsp), %rsi
movl $1600, %edx # imm = 0x640
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 128(%rsp), %rsi
movl $1600, %edx # imm = 0x640
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
leaq 3328(%rsp), %rsi
movl $1600, %edx # imm = 0x640
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $85899345940, %rdx # imm = 0x1400000014
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_10
# %bb.9:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addPA20_iS0_S0_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_10:
movq (%rsp), %rsi
leaq 3328(%rsp), %rbx
movl $1600, %edx # imm = 0x640
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
leaq 1728(%rsp), %rdi
leaq 128(%rsp), %rsi
movq %rbx, %rdx
callq _Z4showPA20_iS0_S0_
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB3_15
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB3_13
# %bb.12:
movzbl 67(%rbx), %eax
jmp .LBB3_14
.LBB3_13:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB3_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $4928, %rsp # imm = 0x1340
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB3_15:
.cfi_def_cfa_offset 4960
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPA20_iS0_S0_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPA20_iS0_S0_,@object # @_Z3addPA20_iS0_S0_
.section .rodata,"a",@progbits
.globl _Z3addPA20_iS0_S0_
.p2align 3, 0x0
_Z3addPA20_iS0_S0_:
.quad _Z18__device_stub__addPA20_iS0_S0_
.size _Z3addPA20_iS0_S0_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "matrix["
.size .L.str, 8
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "]["
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "]"
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " = "
.size .L.str.3, 4
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz " + "
.size .L.str.4, 4
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "="
.size .L.str.5, 2
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\t"
.size .L.str.6, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPA20_iS0_S0_"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPA20_iS0_S0_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPA20_iS0_S0_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<iostream>
#include<cuda.h>
using namespace std;
__global__ void kernel(int *data)
{
data[threadIdx.x + blockIdx.x * 8 ] = threadIdx.x + blockIdx.x;
}
int main(){
const int numElem = 16;
int hostArray[numElem], *dArray;
//
cudaMalloc ( (void**) &dArray, sizeof(int) * numElem );
cudaMemset (dArray, 0, numElem * sizeof (int));
kernel <<< 2, 8 >>>( dArray);
cudaMemcpy(&hostArray, dArray, sizeof (int) * numElem , cudaMemcpyDeviceToHost);
for (int i = 0 ; i < numElem; i++)
cout << hostArray[i] << endl;
cudaFree(dArray);
return 0;
} | code for sm_80
Function : _Z6kernelPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e240000002500 */
/*0050*/ IADD3 R5, R5, R2, RZ ; /* 0x0000000205057210 */
/* 0x001fca0007ffe0ff */
/*0060*/ IMAD R2, R2, 0x7, R5 ; /* 0x0000000702027824 */
/* 0x000fc800078e0205 */
/*0070*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0003 */
/*0080*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0090*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00a0*/ BRA 0xa0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<iostream>
#include<cuda.h>
using namespace std;
__global__ void kernel(int *data)
{
data[threadIdx.x + blockIdx.x * 8 ] = threadIdx.x + blockIdx.x;
}
int main(){
const int numElem = 16;
int hostArray[numElem], *dArray;
//
cudaMalloc ( (void**) &dArray, sizeof(int) * numElem );
cudaMemset (dArray, 0, numElem * sizeof (int));
kernel <<< 2, 8 >>>( dArray);
cudaMemcpy(&hostArray, dArray, sizeof (int) * numElem , cudaMemcpyDeviceToHost);
for (int i = 0 ; i < numElem; i++)
cout << hostArray[i] << endl;
cudaFree(dArray);
return 0;
} | .file "tmpxft_00008770_00000000-6_problem2.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z6kernelPiPi
.type _Z25__device_stub__Z6kernelPiPi, @function
_Z25__device_stub__Z6kernelPiPi:
.LFB3694:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z6kernelPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z25__device_stub__Z6kernelPiPi, .-_Z25__device_stub__Z6kernelPiPi
.globl _Z6kernelPi
.type _Z6kernelPi, @function
_Z6kernelPi:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z6kernelPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z6kernelPi, .-_Z6kernelPi
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $112, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $64, %esi
call cudaMalloc@PLT
movl $64, %edx
movl $0, %esi
movq (%rsp), %rdi
call cudaMemset@PLT
movl $8, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $2, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L12:
leaq 32(%rsp), %r12
movl $2, %ecx
movl $64, %edx
movq (%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq 96(%rsp), %r14
leaq _ZSt4cout(%rip), %r13
jmp .L17
.L21:
movq (%rsp), %rdi
call _Z25__device_stub__Z6kernelPiPi
jmp .L12
.L24:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L22
call _ZSt16__throw_bad_castv@PLT
.L22:
call __stack_chk_fail@PLT
.L15:
movq %rbp, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%rbp), %rax
movl $10, %esi
movq %rbp, %rdi
call *48(%rax)
movl %eax, %esi
.L16:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $4, %r12
cmpq %r12, %r14
je .L23
.L17:
movl (%r12), %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbp
testq %rbp, %rbp
je .L24
cmpb $0, 56(%rbp)
je .L15
movzbl 67(%rbp), %esi
jmp .L16
.L23:
movq (%rsp), %rdi
call cudaFree@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
addq $112, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6kernelPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<iostream>
#include<cuda.h>
using namespace std;
__global__ void kernel(int *data)
{
data[threadIdx.x + blockIdx.x * 8 ] = threadIdx.x + blockIdx.x;
}
int main(){
const int numElem = 16;
int hostArray[numElem], *dArray;
//
cudaMalloc ( (void**) &dArray, sizeof(int) * numElem );
cudaMemset (dArray, 0, numElem * sizeof (int));
kernel <<< 2, 8 >>>( dArray);
cudaMemcpy(&hostArray, dArray, sizeof (int) * numElem , cudaMemcpyDeviceToHost);
for (int i = 0 ; i < numElem; i++)
cout << hostArray[i] << endl;
cudaFree(dArray);
return 0;
} | #include<iostream>
#include<hip/hip_runtime.h>
using namespace std;
__global__ void kernel(int *data)
{
data[threadIdx.x + blockIdx.x * 8 ] = threadIdx.x + blockIdx.x;
}
int main(){
const int numElem = 16;
int hostArray[numElem], *dArray;
//
hipMalloc ( (void**) &dArray, sizeof(int) * numElem );
hipMemset (dArray, 0, numElem * sizeof (int));
kernel <<< 2, 8 >>>( dArray);
hipMemcpy(&hostArray, dArray, sizeof (int) * numElem , hipMemcpyDeviceToHost);
for (int i = 0 ; i < numElem; i++)
cout << hostArray[i] << endl;
hipFree(dArray);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<iostream>
#include<hip/hip_runtime.h>
using namespace std;
__global__ void kernel(int *data)
{
data[threadIdx.x + blockIdx.x * 8 ] = threadIdx.x + blockIdx.x;
}
int main(){
const int numElem = 16;
int hostArray[numElem], *dArray;
//
hipMalloc ( (void**) &dArray, sizeof(int) * numElem );
hipMemset (dArray, 0, numElem * sizeof (int));
kernel <<< 2, 8 >>>( dArray);
hipMemcpy(&hostArray, dArray, sizeof (int) * numElem , hipMemcpyDeviceToHost);
for (int i = 0 ; i < numElem; i++)
cout << hostArray[i] << endl;
hipFree(dArray);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPi
.globl _Z6kernelPi
.p2align 8
.type _Z6kernelPi,@function
_Z6kernelPi:
s_load_b64 s[0:1], s[0:1], 0x0
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v3, s15, v0
v_lshl_add_u32 v1, s15, 3, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[0:1], v3, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPi, .Lfunc_end0-_Z6kernelPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6kernelPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include<iostream>
#include<hip/hip_runtime.h>
using namespace std;
__global__ void kernel(int *data)
{
data[threadIdx.x + blockIdx.x * 8 ] = threadIdx.x + blockIdx.x;
}
int main(){
const int numElem = 16;
int hostArray[numElem], *dArray;
//
hipMalloc ( (void**) &dArray, sizeof(int) * numElem );
hipMemset (dArray, 0, numElem * sizeof (int));
kernel <<< 2, 8 >>>( dArray);
hipMemcpy(&hostArray, dArray, sizeof (int) * numElem , hipMemcpyDeviceToHost);
for (int i = 0 ; i < numElem; i++)
cout << hostArray[i] << endl;
hipFree(dArray);
return 0;
} | .text
.file "problem2.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z21__device_stub__kernelPi # -- Begin function _Z21__device_stub__kernelPi
.p2align 4, 0x90
.type _Z21__device_stub__kernelPi,@function
_Z21__device_stub__kernelPi: # @_Z21__device_stub__kernelPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z6kernelPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPi, .Lfunc_end0-_Z21__device_stub__kernelPi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $128, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 8(%rsp), %rdi
movl $64, %esi
callq hipMalloc
movq 8(%rsp), %rdi
movl $64, %edx
xorl %esi, %esi
callq hipMemset
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 6(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq %rax, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z6kernelPi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
leaq 64(%rsp), %rdi
movl $64, %edx
movl $2, %ecx
callq hipMemcpy
xorl %r14d, %r14d
jmp .LBB1_3
.p2align 4, 0x90
.LBB1_6: # in Loop: Header=BB1_3 Depth=1
movq %rbx, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB1_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB1_3 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r14
cmpq $16, %r14
je .LBB1_8
.LBB1_3: # =>This Inner Loop Header: Depth=1
movl 64(%rsp,%r14,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_9
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB1_3 Depth=1
cmpb $0, 56(%rbx)
je .LBB1_6
# %bb.5: # in Loop: Header=BB1_3 Depth=1
movzbl 67(%rbx), %ecx
jmp .LBB1_7
.LBB1_8:
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_9:
.cfi_def_cfa_offset 160
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPi,@object # @_Z6kernelPi
.section .rodata,"a",@progbits
.globl _Z6kernelPi
.p2align 3, 0x0
_Z6kernelPi:
.quad _Z21__device_stub__kernelPi
.size _Z6kernelPi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6kernelPi"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPi
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6kernelPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e240000002500 */
/*0050*/ IADD3 R5, R5, R2, RZ ; /* 0x0000000205057210 */
/* 0x001fca0007ffe0ff */
/*0060*/ IMAD R2, R2, 0x7, R5 ; /* 0x0000000702027824 */
/* 0x000fc800078e0205 */
/*0070*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0003 */
/*0080*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0090*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00a0*/ BRA 0xa0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPi
.globl _Z6kernelPi
.p2align 8
.type _Z6kernelPi,@function
_Z6kernelPi:
s_load_b64 s[0:1], s[0:1], 0x0
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v3, s15, v0
v_lshl_add_u32 v1, s15, 3, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[0:1], v3, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPi, .Lfunc_end0-_Z6kernelPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6kernelPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00008770_00000000-6_problem2.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z6kernelPiPi
.type _Z25__device_stub__Z6kernelPiPi, @function
_Z25__device_stub__Z6kernelPiPi:
.LFB3694:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z6kernelPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z25__device_stub__Z6kernelPiPi, .-_Z25__device_stub__Z6kernelPiPi
.globl _Z6kernelPi
.type _Z6kernelPi, @function
_Z6kernelPi:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z6kernelPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z6kernelPi, .-_Z6kernelPi
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $112, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $64, %esi
call cudaMalloc@PLT
movl $64, %edx
movl $0, %esi
movq (%rsp), %rdi
call cudaMemset@PLT
movl $8, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $2, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L12:
leaq 32(%rsp), %r12
movl $2, %ecx
movl $64, %edx
movq (%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq 96(%rsp), %r14
leaq _ZSt4cout(%rip), %r13
jmp .L17
.L21:
movq (%rsp), %rdi
call _Z25__device_stub__Z6kernelPiPi
jmp .L12
.L24:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L22
call _ZSt16__throw_bad_castv@PLT
.L22:
call __stack_chk_fail@PLT
.L15:
movq %rbp, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%rbp), %rax
movl $10, %esi
movq %rbp, %rdi
call *48(%rax)
movl %eax, %esi
.L16:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $4, %r12
cmpq %r12, %r14
je .L23
.L17:
movl (%r12), %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbp
testq %rbp, %rbp
je .L24
cmpb $0, 56(%rbp)
je .L15
movzbl 67(%rbp), %esi
jmp .L16
.L23:
movq (%rsp), %rdi
call cudaFree@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
addq $112, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6kernelPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "problem2.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z21__device_stub__kernelPi # -- Begin function _Z21__device_stub__kernelPi
.p2align 4, 0x90
.type _Z21__device_stub__kernelPi,@function
_Z21__device_stub__kernelPi: # @_Z21__device_stub__kernelPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z6kernelPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPi, .Lfunc_end0-_Z21__device_stub__kernelPi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $128, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 8(%rsp), %rdi
movl $64, %esi
callq hipMalloc
movq 8(%rsp), %rdi
movl $64, %edx
xorl %esi, %esi
callq hipMemset
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 6(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq %rax, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z6kernelPi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
leaq 64(%rsp), %rdi
movl $64, %edx
movl $2, %ecx
callq hipMemcpy
xorl %r14d, %r14d
jmp .LBB1_3
.p2align 4, 0x90
.LBB1_6: # in Loop: Header=BB1_3 Depth=1
movq %rbx, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB1_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB1_3 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r14
cmpq $16, %r14
je .LBB1_8
.LBB1_3: # =>This Inner Loop Header: Depth=1
movl 64(%rsp,%r14,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_9
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB1_3 Depth=1
cmpb $0, 56(%rbx)
je .LBB1_6
# %bb.5: # in Loop: Header=BB1_3 Depth=1
movzbl 67(%rbx), %ecx
jmp .LBB1_7
.LBB1_8:
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_9:
.cfi_def_cfa_offset 160
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPi,@object # @_Z6kernelPi
.section .rodata,"a",@progbits
.globl _Z6kernelPi
.p2align 3, 0x0
_Z6kernelPi:
.quad _Z21__device_stub__kernelPi
.size _Z6kernelPi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6kernelPi"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPi
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <chrono>
#include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define SIZE 10
using namespace std;
void print_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
cout<<array[i][j]<<" ";
}
cout<<endl;
}
}
void initialize_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
array[i][j] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
}
}
void array_decl ()
{
static float A[SIZE][SIZE];
static float B[SIZE][SIZE];
static float C[SIZE][SIZE];
static float D[SIZE][SIZE];
initialize_array(A);
initialize_array(B);
initialize_array(C);
initialize_array(D);
int *d_A, *d_B, *d_C, *d_D, *d_temp;
HANDLE_ERROR(cudaMalloc((void**)&d_A, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_B, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_C, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_D, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_temp, SIZE*SIZE*sizeof(float)));
cudaFree (d_A);
cudaFree (d_B);
cudaFree (d_C);
cudaFree (d_D);
cudaFree (d_temp);
}
int main (int argc, char **argv) {
array_decl();
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <chrono>
#include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define SIZE 10
using namespace std;
void print_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
cout<<array[i][j]<<" ";
}
cout<<endl;
}
}
void initialize_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
array[i][j] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
}
}
void array_decl ()
{
static float A[SIZE][SIZE];
static float B[SIZE][SIZE];
static float C[SIZE][SIZE];
static float D[SIZE][SIZE];
initialize_array(A);
initialize_array(B);
initialize_array(C);
initialize_array(D);
int *d_A, *d_B, *d_C, *d_D, *d_temp;
HANDLE_ERROR(cudaMalloc((void**)&d_A, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_B, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_C, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_D, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_temp, SIZE*SIZE*sizeof(float)));
cudaFree (d_A);
cudaFree (d_B);
cudaFree (d_C);
cudaFree (d_D);
cudaFree (d_temp);
}
int main (int argc, char **argv) {
array_decl();
} | .file "tmpxft_00139330_00000000-6_2mm.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%s in %s at line %d\n"
#NO_APP
.text
.type _ZL11HandleError9cudaErrorPKci, @function
_ZL11HandleError9cudaErrorPKci:
.LFB3768:
.cfi_startproc
testl %edi, %edi
jne .L6
ret
.L6:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbx
movl %edx, %ebp
call cudaGetErrorString@PLT
movq %rax, %rdx
movl %ebp, %r8d
movq %rbx, %rcx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE3768:
.size _ZL11HandleError9cudaErrorPKci, .-_ZL11HandleError9cudaErrorPKci
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3775:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3775:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1
.LC1:
.string " "
.text
.globl _Z11print_arrayPA10_f
.type _Z11print_arrayPA10_f, @function
_Z11print_arrayPA10_f:
.LFB3769:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
leaq 40(%rdi), %rbp
leaq 440(%rdi), %r14
leaq _ZSt4cout(%rip), %r12
leaq .LC1(%rip), %r13
jmp .L10
.L18:
call _ZSt16__throw_bad_castv@PLT
.L13:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
.L14:
movsbl %sil, %esi
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $40, %rbp
cmpq %rbp, %r14
je .L9
.L10:
leaq -40(%rbp), %rbx
.L11:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $1, %edx
movq %r13, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %rbp
jne .L11
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %rbx
testq %rbx, %rbx
je .L18
cmpb $0, 56(%rbx)
je .L13
movzbl 67(%rbx), %esi
jmp .L14
.L9:
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3769:
.size _Z11print_arrayPA10_f, .-_Z11print_arrayPA10_f
.globl _Z16initialize_arrayPA10_f
.type _Z16initialize_arrayPA10_f, @function
_Z16initialize_arrayPA10_f:
.LFB3770:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
leaq 40(%rdi), %rbp
leaq 440(%rdi), %r12
.L20:
leaq -40(%rbp), %rbx
.L21:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L21
addq $40, %rbp
cmpq %r12, %rbp
jne .L20
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3770:
.size _Z16initialize_arrayPA10_f, .-_Z16initialize_arrayPA10_f
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "/home/ubuntu/Datasets/stackv2/train-structured/sabusajin/PolyBench-CUDA-OpenACC/master/MatrixMM/2mm.cu"
.text
.globl _Z10array_declv
.type _Z10array_declv, @function
_Z10array_declv:
.LFB3771:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $48, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq _ZZ10array_declvE1A(%rip), %rdi
call _Z16initialize_arrayPA10_f
leaq _ZZ10array_declvE1B(%rip), %rdi
call _Z16initialize_arrayPA10_f
leaq _ZZ10array_declvE1C(%rip), %rdi
call _Z16initialize_arrayPA10_f
leaq _ZZ10array_declvE1D(%rip), %rdi
call _Z16initialize_arrayPA10_f
movq %rsp, %rdi
movl $400, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $57, %edx
leaq .LC3(%rip), %rbx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
leaq 8(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $58, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
leaq 16(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $59, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
leaq 24(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $60, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
leaq 32(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $61, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L28
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3771:
.size _Z10array_declv, .-_Z10array_declv
.globl main
.type main, @function
main:
.LFB3772:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z10array_declv
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3772:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3798:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3798:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZZ10array_declvE1D
.comm _ZZ10array_declvE1D,400,32
.local _ZZ10array_declvE1C
.comm _ZZ10array_declvE1C,400,32
.local _ZZ10array_declvE1B
.comm _ZZ10array_declvE1B,400,32
.local _ZZ10array_declvE1A
.comm _ZZ10array_declvE1A,400,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 805306368
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <chrono>
#include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define SIZE 10
using namespace std;
void print_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
cout<<array[i][j]<<" ";
}
cout<<endl;
}
}
void initialize_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
array[i][j] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
}
}
void array_decl ()
{
static float A[SIZE][SIZE];
static float B[SIZE][SIZE];
static float C[SIZE][SIZE];
static float D[SIZE][SIZE];
initialize_array(A);
initialize_array(B);
initialize_array(C);
initialize_array(D);
int *d_A, *d_B, *d_C, *d_D, *d_temp;
HANDLE_ERROR(cudaMalloc((void**)&d_A, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_B, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_C, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_D, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_temp, SIZE*SIZE*sizeof(float)));
cudaFree (d_A);
cudaFree (d_B);
cudaFree (d_C);
cudaFree (d_D);
cudaFree (d_temp);
}
int main (int argc, char **argv) {
array_decl();
} | #include <iostream>
#include <chrono>
#include "hip/hip_runtime.h"
static void HandleError( hipError_t err, const char *file, int line )
{
if (err != hipSuccess)
{
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define SIZE 10
using namespace std;
void print_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
cout<<array[i][j]<<" ";
}
cout<<endl;
}
}
void initialize_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
array[i][j] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
}
}
void array_decl ()
{
static float A[SIZE][SIZE];
static float B[SIZE][SIZE];
static float C[SIZE][SIZE];
static float D[SIZE][SIZE];
initialize_array(A);
initialize_array(B);
initialize_array(C);
initialize_array(D);
int *d_A, *d_B, *d_C, *d_D, *d_temp;
HANDLE_ERROR(hipMalloc((void**)&d_A, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_B, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_C, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_D, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_temp, SIZE*SIZE*sizeof(float)));
hipFree (d_A);
hipFree (d_B);
hipFree (d_C);
hipFree (d_D);
hipFree (d_temp);
}
int main (int argc, char **argv) {
array_decl();
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <chrono>
#include "hip/hip_runtime.h"
static void HandleError( hipError_t err, const char *file, int line )
{
if (err != hipSuccess)
{
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define SIZE 10
using namespace std;
void print_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
cout<<array[i][j]<<" ";
}
cout<<endl;
}
}
void initialize_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
array[i][j] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
}
}
void array_decl ()
{
static float A[SIZE][SIZE];
static float B[SIZE][SIZE];
static float C[SIZE][SIZE];
static float D[SIZE][SIZE];
initialize_array(A);
initialize_array(B);
initialize_array(C);
initialize_array(D);
int *d_A, *d_B, *d_C, *d_D, *d_temp;
HANDLE_ERROR(hipMalloc((void**)&d_A, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_B, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_C, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_D, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_temp, SIZE*SIZE*sizeof(float)));
hipFree (d_A);
hipFree (d_B);
hipFree (d_C);
hipFree (d_D);
hipFree (d_temp);
}
int main (int argc, char **argv) {
array_decl();
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <chrono>
#include "hip/hip_runtime.h"
static void HandleError( hipError_t err, const char *file, int line )
{
if (err != hipSuccess)
{
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define SIZE 10
using namespace std;
void print_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
cout<<array[i][j]<<" ";
}
cout<<endl;
}
}
void initialize_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
array[i][j] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
}
}
void array_decl ()
{
static float A[SIZE][SIZE];
static float B[SIZE][SIZE];
static float C[SIZE][SIZE];
static float D[SIZE][SIZE];
initialize_array(A);
initialize_array(B);
initialize_array(C);
initialize_array(D);
int *d_A, *d_B, *d_C, *d_D, *d_temp;
HANDLE_ERROR(hipMalloc((void**)&d_A, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_B, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_C, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_D, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_temp, SIZE*SIZE*sizeof(float)));
hipFree (d_A);
hipFree (d_B);
hipFree (d_C);
hipFree (d_D);
hipFree (d_temp);
}
int main (int argc, char **argv) {
array_decl();
} | .text
.file "2mm.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z11print_arrayPA10_f # -- Begin function _Z11print_arrayPA10_f
.p2align 4, 0x90
.type _Z11print_arrayPA10_f,@function
_Z11print_arrayPA10_f: # @_Z11print_arrayPA10_f
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
xorl %r15d, %r15d
jmp .LBB0_1
.p2align 4, 0x90
.LBB0_6: # in Loop: Header=BB0_1 Depth=1
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB0_1 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
addq $40, %rbx
cmpq $10, %r15
je .LBB0_8
.LBB0_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_2 Depth 2
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB0_2: # Parent Loop BB0_1 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r14
cmpq $10, %r14
jne .LBB0_2
# %bb.3: # in Loop: Header=BB0_1 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB0_9
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB0_1 Depth=1
cmpb $0, 56(%r14)
je .LBB0_6
# %bb.5: # in Loop: Header=BB0_1 Depth=1
movzbl 67(%r14), %eax
jmp .LBB0_7
.LBB0_8:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB0_9:
.cfi_def_cfa_offset 32
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size _Z11print_arrayPA10_f, .Lfunc_end0-_Z11print_arrayPA10_f
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z16initialize_arrayPA10_f
.LCPI1_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl _Z16initialize_arrayPA10_f
.p2align 4, 0x90
.type _Z16initialize_arrayPA10_f,@function
_Z16initialize_arrayPA10_f: # @_Z16initialize_arrayPA10_f
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq $10, %r15
jne .LBB1_2
# %bb.3: # in Loop: Header=BB1_1 Depth=1
incq %r14
addq $40, %rbx
cmpq $10, %r14
jne .LBB1_1
# %bb.4:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z16initialize_arrayPA10_f, .Lfunc_end1-_Z16initialize_arrayPA10_f
.cfi_endproc
# -- End function
.globl _Z10array_declv # -- Begin function _Z10array_declv
.p2align 4, 0x90
.type _Z10array_declv,@function
_Z10array_declv: # @_Z10array_declv
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $40, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_1: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
movl $10, %r14d
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
decq %r14
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %rbx
cmpq $10, %rbx
jne .LBB2_1
# %bb.4: # %.preheader.i1.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_5: # %.preheader.i1
# =>This Loop Header: Depth=1
# Child Loop BB2_6 Depth 2
movl $10, %r14d
.p2align 4, 0x90
.LBB2_6: # Parent Loop BB2_5 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
decq %r14
jne .LBB2_6
# %bb.7: # in Loop: Header=BB2_5 Depth=1
incq %rbx
cmpq $10, %rbx
jne .LBB2_5
# %bb.8: # %.preheader.i9.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_9: # %.preheader.i9
# =>This Loop Header: Depth=1
# Child Loop BB2_10 Depth 2
movl $10, %r14d
.p2align 4, 0x90
.LBB2_10: # Parent Loop BB2_9 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
decq %r14
jne .LBB2_10
# %bb.11: # in Loop: Header=BB2_9 Depth=1
incq %rbx
cmpq $10, %rbx
jne .LBB2_9
# %bb.12: # %.preheader.i17.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_13: # %.preheader.i17
# =>This Loop Header: Depth=1
# Child Loop BB2_14 Depth 2
movl $10, %r14d
.p2align 4, 0x90
.LBB2_14: # Parent Loop BB2_13 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
decq %r14
jne .LBB2_14
# %bb.15: # in Loop: Header=BB2_13 Depth=1
incq %rbx
cmpq $10, %rbx
jne .LBB2_13
# %bb.16: # %_Z16initialize_arrayPA10_f.exit24
leaq 32(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB2_17
# %bb.19: # %_ZL11HandleError10hipError_tPKci.exit
leaq 24(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB2_20
# %bb.21: # %_ZL11HandleError10hipError_tPKci.exit26
leaq 16(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB2_22
# %bb.23: # %_ZL11HandleError10hipError_tPKci.exit28
leaq 8(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB2_24
# %bb.25: # %_ZL11HandleError10hipError_tPKci.exit30
movq %rsp, %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB2_26
# %bb.27: # %_ZL11HandleError10hipError_tPKci.exit32
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
addq $40, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB2_17:
.cfi_def_cfa_offset 64
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movl $.L.str.1, %edx
movq %rax, %rsi
movl $57, %ecx
jmp .LBB2_18
.LBB2_20:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movl $.L.str.1, %edx
movq %rax, %rsi
movl $58, %ecx
jmp .LBB2_18
.LBB2_22:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movl $.L.str.1, %edx
movq %rax, %rsi
movl $59, %ecx
jmp .LBB2_18
.LBB2_24:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movl $.L.str.1, %edx
movq %rax, %rsi
movl $60, %ecx
jmp .LBB2_18
.LBB2_26:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movl $.L.str.1, %edx
movq %rax, %rsi
movl $61, %ecx
.LBB2_18:
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end2:
.size _Z10array_declv, .Lfunc_end2-_Z10array_declv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq _Z10array_declv
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " "
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/sabusajin/PolyBench-CUDA-OpenACC/master/MatrixMM/2mm.hip"
.size .L.str.1, 114
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%s in %s at line %d\n"
.size .L.str.2, 21
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00139330_00000000-6_2mm.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%s in %s at line %d\n"
#NO_APP
.text
.type _ZL11HandleError9cudaErrorPKci, @function
_ZL11HandleError9cudaErrorPKci:
.LFB3768:
.cfi_startproc
testl %edi, %edi
jne .L6
ret
.L6:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbx
movl %edx, %ebp
call cudaGetErrorString@PLT
movq %rax, %rdx
movl %ebp, %r8d
movq %rbx, %rcx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE3768:
.size _ZL11HandleError9cudaErrorPKci, .-_ZL11HandleError9cudaErrorPKci
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3775:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3775:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1
.LC1:
.string " "
.text
.globl _Z11print_arrayPA10_f
.type _Z11print_arrayPA10_f, @function
_Z11print_arrayPA10_f:
.LFB3769:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
leaq 40(%rdi), %rbp
leaq 440(%rdi), %r14
leaq _ZSt4cout(%rip), %r12
leaq .LC1(%rip), %r13
jmp .L10
.L18:
call _ZSt16__throw_bad_castv@PLT
.L13:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
.L14:
movsbl %sil, %esi
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $40, %rbp
cmpq %rbp, %r14
je .L9
.L10:
leaq -40(%rbp), %rbx
.L11:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $1, %edx
movq %r13, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %rbp
jne .L11
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %rbx
testq %rbx, %rbx
je .L18
cmpb $0, 56(%rbx)
je .L13
movzbl 67(%rbx), %esi
jmp .L14
.L9:
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3769:
.size _Z11print_arrayPA10_f, .-_Z11print_arrayPA10_f
.globl _Z16initialize_arrayPA10_f
.type _Z16initialize_arrayPA10_f, @function
_Z16initialize_arrayPA10_f:
.LFB3770:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
leaq 40(%rdi), %rbp
leaq 440(%rdi), %r12
.L20:
leaq -40(%rbp), %rbx
.L21:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L21
addq $40, %rbp
cmpq %r12, %rbp
jne .L20
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3770:
.size _Z16initialize_arrayPA10_f, .-_Z16initialize_arrayPA10_f
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "/home/ubuntu/Datasets/stackv2/train-structured/sabusajin/PolyBench-CUDA-OpenACC/master/MatrixMM/2mm.cu"
.text
.globl _Z10array_declv
.type _Z10array_declv, @function
_Z10array_declv:
.LFB3771:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $48, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq _ZZ10array_declvE1A(%rip), %rdi
call _Z16initialize_arrayPA10_f
leaq _ZZ10array_declvE1B(%rip), %rdi
call _Z16initialize_arrayPA10_f
leaq _ZZ10array_declvE1C(%rip), %rdi
call _Z16initialize_arrayPA10_f
leaq _ZZ10array_declvE1D(%rip), %rdi
call _Z16initialize_arrayPA10_f
movq %rsp, %rdi
movl $400, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $57, %edx
leaq .LC3(%rip), %rbx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
leaq 8(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $58, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
leaq 16(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $59, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
leaq 24(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $60, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
leaq 32(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $61, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L28
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3771:
.size _Z10array_declv, .-_Z10array_declv
.globl main
.type main, @function
main:
.LFB3772:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z10array_declv
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3772:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3798:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3798:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZZ10array_declvE1D
.comm _ZZ10array_declvE1D,400,32
.local _ZZ10array_declvE1C
.comm _ZZ10array_declvE1C,400,32
.local _ZZ10array_declvE1B
.comm _ZZ10array_declvE1B,400,32
.local _ZZ10array_declvE1A
.comm _ZZ10array_declvE1A,400,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 805306368
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "2mm.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z11print_arrayPA10_f # -- Begin function _Z11print_arrayPA10_f
.p2align 4, 0x90
.type _Z11print_arrayPA10_f,@function
_Z11print_arrayPA10_f: # @_Z11print_arrayPA10_f
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
xorl %r15d, %r15d
jmp .LBB0_1
.p2align 4, 0x90
.LBB0_6: # in Loop: Header=BB0_1 Depth=1
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB0_1 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
addq $40, %rbx
cmpq $10, %r15
je .LBB0_8
.LBB0_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_2 Depth 2
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB0_2: # Parent Loop BB0_1 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r14
cmpq $10, %r14
jne .LBB0_2
# %bb.3: # in Loop: Header=BB0_1 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB0_9
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB0_1 Depth=1
cmpb $0, 56(%r14)
je .LBB0_6
# %bb.5: # in Loop: Header=BB0_1 Depth=1
movzbl 67(%r14), %eax
jmp .LBB0_7
.LBB0_8:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB0_9:
.cfi_def_cfa_offset 32
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size _Z11print_arrayPA10_f, .Lfunc_end0-_Z11print_arrayPA10_f
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z16initialize_arrayPA10_f
.LCPI1_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl _Z16initialize_arrayPA10_f
.p2align 4, 0x90
.type _Z16initialize_arrayPA10_f,@function
_Z16initialize_arrayPA10_f: # @_Z16initialize_arrayPA10_f
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq $10, %r15
jne .LBB1_2
# %bb.3: # in Loop: Header=BB1_1 Depth=1
incq %r14
addq $40, %rbx
cmpq $10, %r14
jne .LBB1_1
# %bb.4:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z16initialize_arrayPA10_f, .Lfunc_end1-_Z16initialize_arrayPA10_f
.cfi_endproc
# -- End function
.globl _Z10array_declv # -- Begin function _Z10array_declv
.p2align 4, 0x90
.type _Z10array_declv,@function
_Z10array_declv: # @_Z10array_declv
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $40, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_1: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
movl $10, %r14d
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
decq %r14
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %rbx
cmpq $10, %rbx
jne .LBB2_1
# %bb.4: # %.preheader.i1.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_5: # %.preheader.i1
# =>This Loop Header: Depth=1
# Child Loop BB2_6 Depth 2
movl $10, %r14d
.p2align 4, 0x90
.LBB2_6: # Parent Loop BB2_5 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
decq %r14
jne .LBB2_6
# %bb.7: # in Loop: Header=BB2_5 Depth=1
incq %rbx
cmpq $10, %rbx
jne .LBB2_5
# %bb.8: # %.preheader.i9.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_9: # %.preheader.i9
# =>This Loop Header: Depth=1
# Child Loop BB2_10 Depth 2
movl $10, %r14d
.p2align 4, 0x90
.LBB2_10: # Parent Loop BB2_9 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
decq %r14
jne .LBB2_10
# %bb.11: # in Loop: Header=BB2_9 Depth=1
incq %rbx
cmpq $10, %rbx
jne .LBB2_9
# %bb.12: # %.preheader.i17.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_13: # %.preheader.i17
# =>This Loop Header: Depth=1
# Child Loop BB2_14 Depth 2
movl $10, %r14d
.p2align 4, 0x90
.LBB2_14: # Parent Loop BB2_13 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
decq %r14
jne .LBB2_14
# %bb.15: # in Loop: Header=BB2_13 Depth=1
incq %rbx
cmpq $10, %rbx
jne .LBB2_13
# %bb.16: # %_Z16initialize_arrayPA10_f.exit24
leaq 32(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB2_17
# %bb.19: # %_ZL11HandleError10hipError_tPKci.exit
leaq 24(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB2_20
# %bb.21: # %_ZL11HandleError10hipError_tPKci.exit26
leaq 16(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB2_22
# %bb.23: # %_ZL11HandleError10hipError_tPKci.exit28
leaq 8(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB2_24
# %bb.25: # %_ZL11HandleError10hipError_tPKci.exit30
movq %rsp, %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
jne .LBB2_26
# %bb.27: # %_ZL11HandleError10hipError_tPKci.exit32
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
addq $40, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB2_17:
.cfi_def_cfa_offset 64
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movl $.L.str.1, %edx
movq %rax, %rsi
movl $57, %ecx
jmp .LBB2_18
.LBB2_20:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movl $.L.str.1, %edx
movq %rax, %rsi
movl $58, %ecx
jmp .LBB2_18
.LBB2_22:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movl $.L.str.1, %edx
movq %rax, %rsi
movl $59, %ecx
jmp .LBB2_18
.LBB2_24:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movl $.L.str.1, %edx
movq %rax, %rsi
movl $60, %ecx
jmp .LBB2_18
.LBB2_26:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.2, %edi
movl $.L.str.1, %edx
movq %rax, %rsi
movl $61, %ecx
.LBB2_18:
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end2:
.size _Z10array_declv, .Lfunc_end2-_Z10array_declv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq _Z10array_declv
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " "
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/sabusajin/PolyBench-CUDA-OpenACC/master/MatrixMM/2mm.hip"
.size .L.str.1, 114
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%s in %s at line %d\n"
.size .L.str.2, 21
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
template<typename T>
static inline void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
#define CHECK(x) check(x, #x, __FILE__, __LINE__)
// template <class T>
// void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
// CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
// }
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
// printf("checing len: %p",len);
// printf("checing n: %p",n);
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
// int offset=2*step-1;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// int step=len*blockDim.x;
// // int offset=2*step-1;
// int offset=2*step;
// unsigned int start= blockDim.x*step*index+offset;
// unsigned int end= blockDim.x*step*(index+1);
// for(unsigned int i=start; i<end && i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index==0) return; //the first block is not needed to merge
int step=len*blockDim.x;
int start=index*step+1; //exclusive
// int end=start+step;
int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[blockIdx.x*len*blockDim.x-1];//last element at last block
int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
int main(){
const unsigned int n=100000; //100000 number
unsigned int data[n];
unsigned int result[n];
unsigned int inter_sum[n];
unsigned int inter_result[n];
unsigned int *cal_result=new unsigned int [n];
for (unsigned int i=0; i<n; i++){
data[i]=i;
}
for (unsigned int i=0; i<n; i++){
cal_result[i]=i;
}
for (long long i=0; i<n; i++){
result[i]=(i-1)*i/2;
}
std::cout<< "data preparation done"<<std::endl;
const int block_size=64;//64 threads per block;
const int len=1000; // add 1000 prefix sum per thread;
unsigned int *d_in=NULL;
CHECK(cudaMalloc((void**)&d_in,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(cudaMalloc((void**)&d_sum,n*sizeof(unsigned int)));
CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int)));
CHECK(cudaMemcpy(d_in,data,n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice);
// std::cout<< divup(n,block_size*len) <<std::endl;
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
prefixsum<<<divup(n,block_size*len),block_size>>>(d_in,d_sum,len,n);
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=64001; i<65001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass here"<<std::endl;
CHECK(cudaGetLastError());
long long start=64001;
// int end=start+1000;
unsigned int end=100000;
// std::cout<< end*end<<std::endl;
for (unsigned int i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=((start-1+i-1)*(i-start+1))/2;
}
start=1;
end=start+64000;
for (long long i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=(i-1)*(i-start+1)/2;
}
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// for (unsigned int i=65000; i<66000; i++){
// inter_result[i]=(65000+i)*(i-65000+1)/2;
// }
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
// for (unsigned int i=1001; i<2001; i++){
// inter_result[i]=(1000+i-1)*(i-1000)/2;
// }
// inter_result[2000]+=result[1000];
serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int i=1001; i<2001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"first: i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// std::cout << "pass first one"<<std::endl;
// for (int i=64000; i<65000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// inter_result[65999]+=inter_result[64999];
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(inter_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//serial sum
CHECK(cudaMemcpy(inter_sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int i=64001; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here 1"<<std::endl;
serialsum_accrossblock(inter_sum, len, n, block_size);
CHECK(cudaMemcpy(d_sum, inter_sum,n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int i=1; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here"<<std::endl;
// serialsum_accrossblock<<<divup(n,block_size*len*block_size*block_size) ,block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=0; i<100000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// for (unsigned int i=64000; i<100000; i++){
// inter_result[i]+=inter_result[63999];
// }
// std::cout<< divup(n,block_size*len) << std::endl;
mergeblock<<<divup(n,block_size*len*block_size) ,block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// cuda_memcpy(cal_result, d_sum, n, cudaMemcpyDeviceToHost);
CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_sum));
//compare
unsigned int i;
for (i=0; i<n; i++){
if(result[i]!=cal_result[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<<result[i]<<"vs"<<cal_result[i]<<std::endl;
break;
}
}
if(i==n){
std::cout<<"correct"<<std::endl;
}
return 0;
} | code for sm_80
Function : _Z10mergeblockPjij
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0050*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0060*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe40000000800 */
/*0070*/ ULDC UR5, c[0x0][0x168] ; /* 0x00005a0000057ab9 */
/* 0x000fe40000000800 */
/*0080*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fcc000f8e023f */
/*0090*/ IMAD R0, R0, UR4, RZ ; /* 0x0000000400007c24 */
/* 0x000fca000f8e02ff */
/*00a0*/ IADD3 R5, R0.reuse, 0x1, RZ ; /* 0x0000000100057810 */
/* 0x040fe40007ffe0ff */
/*00b0*/ IADD3 R4, R0, UR4, RZ ; /* 0x0000000400047c10 */
/* 0x000fe4000fffe0ff */
/*00c0*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x16c], PT ; /* 0x00005b0005007a0c */
/* 0x000fc80003f06070 */
/*00d0*/ ISETP.GE.OR P0, PT, R5, R4, P0 ; /* 0x000000040500720c */
/* 0x000fda0000706670 */
/*00e0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00f0*/ IMAD.MOV.U32 R8, RZ, RZ, 0x4 ; /* 0x00000004ff087424 */
/* 0x000fe200078e00ff */
/*0100*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fc60000000a00 */
/*0110*/ IMAD.WIDE R2, R0, R8, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0208 */
/*0120*/ LDG.E R0, [R2.64] ; /* 0x0000000602007981 */
/* 0x000162000c1e1900 */
/*0130*/ UIADD3 UR4, -UR4, 0x1, URZ ; /* 0x0000000104047890 */
/* 0x000fe2000fffe13f */
/*0140*/ IADD3 R6, R5, -c[0x0][0x16c], RZ ; /* 0x80005b0005067a10 */
/* 0x000fe20007ffe0ff */
/*0150*/ BSSY B0, 0x240 ; /* 0x000000e000007945 */
/* 0x000fe80003800000 */
/*0160*/ IMNMX.U32 R7, R6, UR4, !PT ; /* 0x0000000406077c17 */
/* 0x000fc8000f800000 */
/*0170*/ ISETP.GT.U32.AND P0, PT, R7, -0x4, PT ; /* 0xfffffffc0700780c */
/* 0x000fe20003f04070 */
/*0180*/ IMAD.MOV R6, RZ, RZ, -R7 ; /* 0x000000ffff067224 */
/* 0x000fca00078e0a07 */
/*0190*/ LOP3.LUT P1, R6, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306067812 */
/* 0x000fda000782c0ff */
/*01a0*/ @!P1 BRA 0x230 ; /* 0x0000008000009947 */
/* 0x001fea0003800000 */
/*01b0*/ IMAD.WIDE R2, R5, R8, c[0x0][0x160] ; /* 0x0000580005027625 */
/* 0x001fca00078e0208 */
/*01c0*/ LDG.E R7, [R2.64] ; /* 0x0000000602077981 */
/* 0x000ea2000c1e1900 */
/*01d0*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105057810 */
/* 0x000fe40007ffe0ff */
/*01f0*/ ISETP.NE.AND P1, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f25270 */
/*0200*/ IMAD.IADD R7, R0, 0x1, R7 ; /* 0x0000000100077824 */
/* 0x024fca00078e0207 */
/*0210*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001ee000c101906 */
/*0220*/ @P1 BRA 0x1b0 ; /* 0xffffff8000001947 */
/* 0x000fea000383ffff */
/*0230*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0240*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0250*/ IMAD.WIDE R2, R5, R8, c[0x0][0x160] ; /* 0x0000580005027625 */
/* 0x001fca00078e0208 */
/*0260*/ LDG.E R7, [R2.64] ; /* 0x0000000602077981 */
/* 0x000ea8000c1e1900 */
/*0270*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040602097981 */
/* 0x000ee8000c1e1900 */
/*0280*/ LDG.E R11, [R2.64+0x8] ; /* 0x00000806020b7981 */
/* 0x000f28000c1e1900 */
/*0290*/ LDG.E R13, [R2.64+0xc] ; /* 0x00000c06020d7981 */
/* 0x000f22000c1e1900 */
/*02a0*/ IADD3 R5, R5, 0x4, RZ ; /* 0x0000000405057810 */
/* 0x000fc80007ffe0ff */
/*02b0*/ ISETP.GE.AND P0, PT, R5.reuse, R4, PT ; /* 0x000000040500720c */
/* 0x040fe40003f06270 */
/*02c0*/ ISETP.LT.U32.AND P1, PT, R5, c[0x0][0x16c], PT ; /* 0x00005b0005007a0c */
/* 0x000fe20003f21070 */
/*02d0*/ IMAD.IADD R7, R0.reuse, 0x1, R7 ; /* 0x0000000100077824 */
/* 0x064fe400078e0207 */
/*02e0*/ IMAD.IADD R9, R0, 0x1, R9 ; /* 0x0000000100097824 */
/* 0x008fc600078e0209 */
/*02f0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e2000c101906 */
/*0300*/ IMAD.IADD R11, R0, 0x1, R11 ; /* 0x00000001000b7824 */
/* 0x010fc600078e020b */
/*0310*/ STG.E [R2.64+0x4], R9 ; /* 0x0000040902007986 */
/* 0x0001e2000c101906 */
/*0320*/ IMAD.IADD R13, R0, 0x1, R13 ; /* 0x00000001000d7824 */
/* 0x000fc600078e020d */
/*0330*/ STG.E [R2.64+0x8], R11 ; /* 0x0000080b02007986 */
/* 0x0001e8000c101906 */
/*0340*/ STG.E [R2.64+0xc], R13 ; /* 0x00000c0d02007986 */
/* 0x0001e2000c101906 */
/*0350*/ @!P0 BRA P1, 0x250 ; /* 0xfffffef000008947 */
/* 0x000fea000083ffff */
/*0360*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0370*/ BRA 0x370; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z11mergethreadPjij
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x001fda0003f05270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0050*/ IMAD R0, R3, c[0x0][0x0], R0 ; /* 0x0000000003007a24 */
/* 0x001fc800078e0200 */
/*0060*/ IMAD R7, R0, c[0x0][0x168], RZ ; /* 0x00005a0000077a24 */
/* 0x000fca00078e02ff */
/*0070*/ IADD3 R5, R7.reuse, 0x1, RZ ; /* 0x0000000107057810 */
/* 0x040fe40007ffe0ff */
/*0080*/ IADD3 R0, R7, c[0x0][0x168], RZ ; /* 0x00005a0007007a10 */
/* 0x000fe40007ffe0ff */
/*0090*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x16c], PT ; /* 0x00005b0005007a0c */
/* 0x000fc80003f06070 */
/*00a0*/ ISETP.GE.U32.OR P0, PT, R5, R0, P0 ; /* 0x000000000500720c */
/* 0x000fda0000706470 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fe200078e00ff */
/*00d0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fc60000000a00 */
/*00e0*/ IMAD.WIDE.U32 R6, R7, R2, c[0x0][0x160] ; /* 0x0000580007067625 */
/* 0x000fca00078e0002 */
/*00f0*/ LDG.E R3, [R6.64] ; /* 0x0000000606037981 */
/* 0x000162000c1e1900 */
/*0100*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe20000000800 */
/*0110*/ IADD3 R4, R5, -c[0x0][0x16c], RZ ; /* 0x80005b0005047a10 */
/* 0x000fe20007ffe0ff */
/*0120*/ UIADD3 UR4, -UR4, 0x1, URZ ; /* 0x0000000104047890 */
/* 0x000fe2000fffe13f */
/*0130*/ BSSY B0, 0x220 ; /* 0x000000e000007945 */
/* 0x000fea0003800000 */
/*0140*/ IMNMX.U32 R8, R4, UR4, !PT ; /* 0x0000000404087c17 */
/* 0x000fc8000f800000 */
/*0150*/ ISETP.GT.U32.AND P0, PT, R8, -0x4, PT ; /* 0xfffffffc0800780c */
/* 0x000fe20003f04070 */
/*0160*/ IMAD.MOV R4, RZ, RZ, -R8 ; /* 0x000000ffff047224 */
/* 0x000fca00078e0a08 */
/*0170*/ LOP3.LUT P1, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000782c0ff */
/*0180*/ @!P1 BRA 0x210 ; /* 0x0000008000009947 */
/* 0x001fea0003800000 */
/*0190*/ IMAD.WIDE.U32 R6, R5, R2, c[0x0][0x160] ; /* 0x0000580005067625 */
/* 0x001fca00078e0002 */
/*01a0*/ LDG.E R8, [R6.64] ; /* 0x0000000606087981 */
/* 0x000ea2000c1e1900 */
/*01b0*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105057810 */
/* 0x000fe40007ffe0ff */
/*01d0*/ ISETP.NE.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f25270 */
/*01e0*/ IMAD.IADD R9, R3, 0x1, R8 ; /* 0x0000000103097824 */
/* 0x024fca00078e0208 */
/*01f0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001ee000c101906 */
/*0200*/ @P1 BRA 0x190 ; /* 0xffffff8000001947 */
/* 0x000fea000383ffff */
/*0210*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0220*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0230*/ IMAD.WIDE.U32 R12, R5, R2, c[0x0][0x160] ; /* 0x00005800050c7625 */
/* 0x002fca00078e0002 */
/*0240*/ LDG.E R4, [R12.64] ; /* 0x000000060c047981 */
/* 0x000ea2000c1e1900 */
/*0250*/ IADD3 R7, R5, 0x1, RZ ; /* 0x0000000105077810 */
/* 0x001fca0007ffe0ff */
/*0260*/ IMAD.WIDE.U32 R6, R7, R2, c[0x0][0x160] ; /* 0x0000580007067625 */
/* 0x000fc800078e0002 */
/*0270*/ IMAD.IADD R15, R3, 0x1, R4 ; /* 0x00000001030f7824 */
/* 0x024fca00078e0204 */
/*0280*/ STG.E [R12.64], R15 ; /* 0x0000000f0c007986 */
/* 0x0001e8000c101906 */
/*0290*/ LDG.E R4, [R6.64] ; /* 0x0000000606047981 */
/* 0x000ea2000c1e1900 */
/*02a0*/ IADD3 R9, R5, 0x2, RZ ; /* 0x0000000205097810 */
/* 0x000fca0007ffe0ff */
/*02b0*/ IMAD.WIDE.U32 R8, R9, R2, c[0x0][0x160] ; /* 0x0000580009087625 */
/* 0x000fc800078e0002 */
/*02c0*/ IMAD.IADD R17, R3, 0x1, R4 ; /* 0x0000000103117824 */
/* 0x004fca00078e0204 */
/*02d0*/ STG.E [R6.64], R17 ; /* 0x0000001106007986 */
/* 0x0003e8000c101906 */
/*02e0*/ LDG.E R4, [R8.64] ; /* 0x0000000608047981 */
/* 0x000ea2000c1e1900 */
/*02f0*/ IADD3 R11, R5, 0x3, RZ ; /* 0x00000003050b7810 */
/* 0x000fca0007ffe0ff */
/*0300*/ IMAD.WIDE.U32 R10, R11, R2, c[0x0][0x160] ; /* 0x000058000b0a7625 */
/* 0x000fc800078e0002 */
/*0310*/ IMAD.IADD R19, R3, 0x1, R4 ; /* 0x0000000103137824 */
/* 0x004fca00078e0204 */
/*0320*/ STG.E [R8.64], R19 ; /* 0x0000001308007986 */
/* 0x0003e8000c101906 */
/*0330*/ LDG.E R4, [R10.64] ; /* 0x000000060a047981 */
/* 0x000e22000c1e1900 */
/*0340*/ IADD3 R5, R5, 0x4, RZ ; /* 0x0000000405057810 */
/* 0x000fc80007ffe0ff */
/*0350*/ ISETP.GE.U32.AND P0, PT, R5.reuse, R0, PT ; /* 0x000000000500720c */
/* 0x040fe40003f06070 */
/*0360*/ ISETP.LT.U32.AND P1, PT, R5, c[0x0][0x16c], PT ; /* 0x00005b0005007a0c */
/* 0x000fe20003f21070 */
/*0370*/ IMAD.IADD R13, R3, 0x1, R4 ; /* 0x00000001030d7824 */
/* 0x001fca00078e0204 */
/*0380*/ STG.E [R10.64], R13 ; /* 0x0000000d0a007986 */
/* 0x0003ee000c101906 */
/*0390*/ @!P0 BRA P1, 0x230 ; /* 0xfffffe9000008947 */
/* 0x000fea000083ffff */
/*03a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03b0*/ BRA 0x3b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z23serialsum_accrossthreadPjij
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff057624 */
/* 0x000fc600078e00ff */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0040*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fe400078e0203 */
/*0050*/ IMAD R3, R5, c[0x0][0x0], RZ ; /* 0x0000000005037a24 */
/* 0x000fc800078e02ff */
/*0060*/ IMAD R0, R0, R3, RZ ; /* 0x0000000300007224 */
/* 0x000fca00078e02ff */
/*0070*/ LEA R2, R5, R0, 0x1 ; /* 0x0000000005027211 */
/* 0x000fe400078e08ff */
/*0080*/ IADD3 R7, R0, 0x1, R3 ; /* 0x0000000100077810 */
/* 0x000fe40007ffe003 */
/*0090*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x16c], PT ; /* 0x00005b0002007a0c */
/* 0x000fc80003f06070 */
/*00a0*/ ISETP.GE.U32.OR P0, PT, R2, R7, P0 ; /* 0x000000070200720c */
/* 0x000fda0000706470 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ IMAD.MOV.U32 R0, RZ, RZ, R2 ; /* 0x000000ffff007224 */
/* 0x000fe200078e0002 */
/*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00e0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x001fe200000001ff */
/*00f0*/ IADD3 R2, R0, -c[0x0][0x168], RZ ; /* 0x80005a0000027a10 */
/* 0x000fd20007ffe0ff */
/*0100*/ IMAD.WIDE.U32 R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc800078e0005 */
/*0110*/ IMAD.WIDE.U32 R4, R0.reuse, R5, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x040fe400078e0005 */
/*0120*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*0130*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea2000c1e1900 */
/*0140*/ IADD3 R0, R0, c[0x0][0x168], RZ ; /* 0x00005a0000007a10 */
/* 0x000fc80007ffe0ff */
/*0150*/ ISETP.GE.U32.AND P0, PT, R0.reuse, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x040fe40003f06070 */
/*0160*/ ISETP.LT.U32.AND P1, PT, R0, R7, PT ; /* 0x000000070000720c */
/* 0x000fe20003f21070 */
/*0170*/ IMAD.IADD R9, R6, 0x1, R3 ; /* 0x0000000106097824 */
/* 0x004fca00078e0203 */
/*0180*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001ee000c101904 */
/*0190*/ @!P0 BRA P1, 0xe0 ; /* 0xffffff4000008947 */
/* 0x000fea000083ffff */
/*01a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01b0*/ BRA 0x1b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z9prefixsumPjS_ij
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0203 */
/*0040*/ IMAD R3, R0, c[0x0][0x170], RZ ; /* 0x00005c0000037a24 */
/* 0x000fca00078e02ff */
/*0050*/ IADD3 R9, R3, 0x1, RZ ; /* 0x0000000103097810 */
/* 0x000fc80007ffe0ff */
/*0060*/ ISETP.GT.U32.AND P0, PT, R9, c[0x0][0x174], PT ; /* 0x00005d0009007a0c */
/* 0x000fda0003f04070 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ IMAD.MOV.U32 R0, RZ, RZ, 0x4 ; /* 0x00000004ff007424 */
/* 0x000fe200078e00ff */
/*0090*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fc60000000a00 */
/*00a0*/ IMAD.WIDE R4, R3, R0, c[0x0][0x160] ; /* 0x0000580003047625 */
/* 0x000fcc00078e0200 */
/*00b0*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IADD3 R3, R3, 0x2, RZ ; /* 0x0000000203037810 */
/* 0x000fe20007ffe0ff */
/*00d0*/ IMAD.WIDE R6, R9.reuse, R0, c[0x0][0x168] ; /* 0x00005a0009067625 */
/* 0x040fe200078e0200 */
/*00e0*/ IADD3 R2, R9, c[0x0][0x170], RZ ; /* 0x00005c0009027a10 */
/* 0x000fe40007ffe0ff */
/*00f0*/ ISETP.GE.U32.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fc80003f06070 */
/*0100*/ ISETP.GE.U32.OR P0, PT, R3, R2, P0 ; /* 0x000000020300720c */
/* 0x000fe20000706470 */
/*0110*/ STG.E [R6.64], R5 ; /* 0x0000000506007986 */
/* 0x0041d8000c101906 */
/*0120*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0130*/ ULDC UR4, c[0x0][0x170] ; /* 0x00005c0000047ab9 */
/* 0x000fe20000000800 */
/*0140*/ IADD3 R4, R3, -c[0x0][0x174], RZ ; /* 0x80005d0003047a10 */
/* 0x000fe20007ffe0ff */
/*0150*/ UIADD3 UR4, -UR4, 0x1, URZ ; /* 0x0000000104047890 */
/* 0x000fe2000fffe13f */
/*0160*/ BSSY B0, 0x2b0 ; /* 0x0000014000007945 */
/* 0x000fea0003800000 */
/*0170*/ IMNMX.U32 R4, R4, UR4, !PT ; /* 0x0000000404047c17 */
/* 0x000fc8000f800000 */
/*0180*/ ISETP.GT.U32.AND P0, PT, R4, -0x4, PT ; /* 0xfffffffc0400780c */
/* 0x000fe20003f04070 */
/*0190*/ IMAD.MOV R6, RZ, RZ, -R4 ; /* 0x000000ffff067224 */
/* 0x001fca00078e0a04 */
/*01a0*/ LOP3.LUT P1, R6, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306067812 */
/* 0x000fda000782c0ff */
/*01b0*/ @!P1 BRA 0x2a0 ; /* 0x000000e000009947 */
/* 0x000fea0003800000 */
/*01c0*/ IMAD.WIDE.U32 R4, R9, R0, c[0x0][0x168] ; /* 0x00005a0009047625 */
/* 0x000fca00078e0000 */
/*01d0*/ LDG.E R9, [R4.64] ; /* 0x0000000604097981 */
/* 0x000162000c1e1900 */
/*01e0*/ IMAD.MOV.U32 R8, RZ, RZ, R6 ; /* 0x000000ffff087224 */
/* 0x000fc600078e0006 */
/*01f0*/ IADD3 R5, R3.reuse, -0x1, RZ ; /* 0xffffffff03057810 */
/* 0x041fe20007ffe0ff */
/*0200*/ IMAD.WIDE.U32 R6, R3, R0, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fc800078e0000 */
/*0210*/ IMAD.WIDE.U32 R4, R5, R0, c[0x0][0x160] ; /* 0x0000580005047625 */
/* 0x000fe200078e0000 */
/*0220*/ LDG.E R10, [R6.64] ; /* 0x00000006060a7981 */
/* 0x000eaa000c1e1900 */
/*0230*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea2000c1e1900 */
/*0240*/ IADD3 R8, R8, -0x1, RZ ; /* 0xffffffff08087810 */
/* 0x000fe40007ffe0ff */
/*0250*/ IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103037810 */
/* 0x000fe40007ffe0ff */
/*0260*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fc40003f25270 */
/*0270*/ IADD3 R9, R10, R4, R9 ; /* 0x000000040a097210 */
/* 0x024fca0007ffe009 */
/*0280*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001ec000c101906 */
/*0290*/ @P1 BRA 0x1f0 ; /* 0xffffff5000001947 */
/* 0x000fea000383ffff */
/*02a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*02c0*/ IADD3 R11, R3.reuse, -0x1, RZ ; /* 0xffffffff030b7810 */
/* 0x042fe20007ffe0ff */
/*02d0*/ IMAD.WIDE.U32 R12, R3, R0, c[0x0][0x168] ; /* 0x00005a00030c7625 */
/* 0x000fc800078e0000 */
/*02e0*/ IMAD.WIDE.U32 R4, R11.reuse, R0.reuse, c[0x0][0x168] ; /* 0x00005a000b047625 */
/* 0x0c0fe200078e0000 */
/*02f0*/ LDG.E R6, [R12.64] ; /* 0x000000060c067981 */
/* 0x001ea6000c1e1900 */
/*0300*/ IMAD.WIDE.U32 R10, R11, R0, c[0x0][0x160] ; /* 0x000058000b0a7625 */
/* 0x000fe400078e0000 */
/*0310*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea8000c1e1900 */
/*0320*/ LDG.E R10, [R10.64] ; /* 0x000000060a0a7981 */
/* 0x000ea2000c1e1900 */
/*0330*/ IADD3 R21, R3, 0x1, RZ ; /* 0x0000000103157810 */
/* 0x000fca0007ffe0ff */
/*0340*/ IMAD.WIDE.U32 R8, R21, R0, c[0x0][0x168] ; /* 0x00005a0015087625 */
/* 0x000fe200078e0000 */
/*0350*/ IADD3 R15, R6, R10, R5 ; /* 0x0000000a060f7210 */
/* 0x004fc60007ffe005 */
/*0360*/ IMAD.WIDE.U32 R6, R3.reuse, R0.reuse, c[0x0][0x160] ; /* 0x0000580003067625 */
/* 0x0c0fe400078e0000 */
/*0370*/ STG.E [R12.64], R15 ; /* 0x0000000f0c007986 */
/* 0x0001e8000c101906 */
/*0380*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea8000c1e1900 */
/*0390*/ LDG.E R14, [R8.64] ; /* 0x00000006080e7981 */
/* 0x000ea2000c1e1900 */
/*03a0*/ IADD3 R19, R3, 0x2, RZ ; /* 0x0000000203137810 */
/* 0x000fe20007ffe0ff */
/*03b0*/ IMAD.WIDE.U32 R4, R21, R0, c[0x0][0x160] ; /* 0x0000580015047625 */
/* 0x000fc800078e0000 */
/*03c0*/ IMAD.WIDE.U32 R10, R19, R0, c[0x0][0x168] ; /* 0x00005a00130a7625 */
/* 0x000fe200078e0000 */
/*03d0*/ IADD3 R17, R14, R6, R15 ; /* 0x000000060e117210 */
/* 0x004fca0007ffe00f */
/*03e0*/ STG.E [R8.64], R17 ; /* 0x0000001108007986 */
/* 0x0003e8000c101906 */
/*03f0*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea8000c1e1900 */
/*0400*/ LDG.E R14, [R10.64] ; /* 0x000000060a0e7981 */
/* 0x000ea2000c1e1900 */
/*0410*/ IADD3 R21, R3, 0x3, RZ ; /* 0x0000000303157810 */
/* 0x000fe20007ffe0ff */
/*0420*/ IMAD.WIDE.U32 R6, R19, R0, c[0x0][0x160] ; /* 0x0000580013067625 */
/* 0x000fc800078e0000 */
/*0430*/ IMAD.WIDE.U32 R12, R21, R0, c[0x0][0x168] ; /* 0x00005a00150c7625 */
/* 0x001fe200078e0000 */
/*0440*/ IADD3 R15, R14, R4, R17 ; /* 0x000000040e0f7210 */
/* 0x004fca0007ffe011 */
/*0450*/ STG.E [R10.64], R15 ; /* 0x0000000f0a007986 */
/* 0x0003e8000c101906 */
/*0460*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea8000c1e1900 */
/*0470*/ LDG.E R14, [R12.64] ; /* 0x000000060c0e7981 */
/* 0x000ea2000c1e1900 */
/*0480*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */
/* 0x000fc80007ffe0ff */
/*0490*/ ISETP.GE.U32.AND P0, PT, R3.reuse, R2, PT ; /* 0x000000020300720c */
/* 0x040fe40003f06070 */
/*04a0*/ ISETP.LT.U32.AND P1, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fe40003f21070 */
/*04b0*/ IADD3 R19, R14, R6, R15 ; /* 0x000000060e137210 */
/* 0x004fca0007ffe00f */
/*04c0*/ STG.E [R12.64], R19 ; /* 0x000000130c007986 */
/* 0x0003ec000c101906 */
/*04d0*/ @!P0 BRA P1, 0x2c0 ; /* 0xfffffde000008947 */
/* 0x000fea000083ffff */
/*04e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04f0*/ BRA 0x4f0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
template<typename T>
static inline void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
#define CHECK(x) check(x, #x, __FILE__, __LINE__)
// template <class T>
// void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
// CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
// }
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
// printf("checing len: %p",len);
// printf("checing n: %p",n);
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
// int offset=2*step-1;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// int step=len*blockDim.x;
// // int offset=2*step-1;
// int offset=2*step;
// unsigned int start= blockDim.x*step*index+offset;
// unsigned int end= blockDim.x*step*(index+1);
// for(unsigned int i=start; i<end && i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index==0) return; //the first block is not needed to merge
int step=len*blockDim.x;
int start=index*step+1; //exclusive
// int end=start+step;
int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[blockIdx.x*len*blockDim.x-1];//last element at last block
int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
int main(){
const unsigned int n=100000; //100000 number
unsigned int data[n];
unsigned int result[n];
unsigned int inter_sum[n];
unsigned int inter_result[n];
unsigned int *cal_result=new unsigned int [n];
for (unsigned int i=0; i<n; i++){
data[i]=i;
}
for (unsigned int i=0; i<n; i++){
cal_result[i]=i;
}
for (long long i=0; i<n; i++){
result[i]=(i-1)*i/2;
}
std::cout<< "data preparation done"<<std::endl;
const int block_size=64;//64 threads per block;
const int len=1000; // add 1000 prefix sum per thread;
unsigned int *d_in=NULL;
CHECK(cudaMalloc((void**)&d_in,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(cudaMalloc((void**)&d_sum,n*sizeof(unsigned int)));
CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int)));
CHECK(cudaMemcpy(d_in,data,n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice);
// std::cout<< divup(n,block_size*len) <<std::endl;
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
prefixsum<<<divup(n,block_size*len),block_size>>>(d_in,d_sum,len,n);
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=64001; i<65001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass here"<<std::endl;
CHECK(cudaGetLastError());
long long start=64001;
// int end=start+1000;
unsigned int end=100000;
// std::cout<< end*end<<std::endl;
for (unsigned int i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=((start-1+i-1)*(i-start+1))/2;
}
start=1;
end=start+64000;
for (long long i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=(i-1)*(i-start+1)/2;
}
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// for (unsigned int i=65000; i<66000; i++){
// inter_result[i]=(65000+i)*(i-65000+1)/2;
// }
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
// for (unsigned int i=1001; i<2001; i++){
// inter_result[i]=(1000+i-1)*(i-1000)/2;
// }
// inter_result[2000]+=result[1000];
serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int i=1001; i<2001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"first: i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// std::cout << "pass first one"<<std::endl;
// for (int i=64000; i<65000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// inter_result[65999]+=inter_result[64999];
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(inter_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//serial sum
CHECK(cudaMemcpy(inter_sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int i=64001; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here 1"<<std::endl;
serialsum_accrossblock(inter_sum, len, n, block_size);
CHECK(cudaMemcpy(d_sum, inter_sum,n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int i=1; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here"<<std::endl;
// serialsum_accrossblock<<<divup(n,block_size*len*block_size*block_size) ,block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=0; i<100000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// for (unsigned int i=64000; i<100000; i++){
// inter_result[i]+=inter_result[63999];
// }
// std::cout<< divup(n,block_size*len) << std::endl;
mergeblock<<<divup(n,block_size*len*block_size) ,block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// cuda_memcpy(cal_result, d_sum, n, cudaMemcpyDeviceToHost);
CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_sum));
//compare
unsigned int i;
for (i=0; i<n; i++){
if(result[i]!=cal_result[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<<result[i]<<"vs"<<cal_result[i]<<std::endl;
break;
}
}
if(i==n){
std::cout<<"correct"<<std::endl;
}
return 0;
} | .file "tmpxft_00006a5f_00000000-6_prefixsum.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "CUDA error at: "
.LC1:
.string ":"
.LC2:
.string " "
#NO_APP
.text
.type _Z5checkI9cudaErrorEvT_PKcS3_i, @function
_Z5checkI9cudaErrorEvT_PKcS3_i:
.LFB4009:
.cfi_startproc
testl %edi, %edi
jne .L6
ret
.L6:
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl %edi, %ebx
movq %rsi, %r13
movq %rdx, %r12
movl %ecx, %ebp
leaq .LC0(%rip), %rsi
leaq _ZSt4cerr(%rip), %r14
movq %r14, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r12, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rsi
movq %r14, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r13, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE4009:
.size _Z5checkI9cudaErrorEvT_PKcS3_i, .-_Z5checkI9cudaErrorEvT_PKcS3_i
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3675:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3675:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z22serialsum_accrossblockPjiji
.type _Z22serialsum_accrossblockPjiji, @function
_Z22serialsum_accrossblockPjiji:
.LFB3670:
.cfi_startproc
endbr64
imull %ecx, %esi
leal (%rsi,%rsi), %eax
cmpl %edx, %eax
jnb .L9
movl %esi, %ecx
.L11:
movl %eax, %r8d
movl %ecx, %r9d
movl (%rdi,%r9,4), %r9d
addl %r9d, (%rdi,%r8,4)
addl %esi, %eax
addl %esi, %ecx
cmpl %edx, %eax
jb .L11
.L9:
ret
.cfi_endproc
.LFE3670:
.size _Z22serialsum_accrossblockPjiji, .-_Z22serialsum_accrossblockPjiji
.globl _Z32__device_stub__Z9prefixsumPjS_ijPjS_ij
.type _Z32__device_stub__Z9prefixsumPjS_ijPjS_ij, @function
_Z32__device_stub__Z9prefixsumPjS_ijPjS_ij:
.LFB3697:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9prefixsumPjS_ij(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3697:
.size _Z32__device_stub__Z9prefixsumPjS_ijPjS_ij, .-_Z32__device_stub__Z9prefixsumPjS_ijPjS_ij
.globl _Z9prefixsumPjS_ij
.type _Z9prefixsumPjS_ij, @function
_Z9prefixsumPjS_ij:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9prefixsumPjS_ijPjS_ij
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _Z9prefixsumPjS_ij, .-_Z9prefixsumPjS_ij
.globl _Z45__device_stub__Z23serialsum_accrossthreadPjijPjij
.type _Z45__device_stub__Z23serialsum_accrossthreadPjijPjij, @function
_Z45__device_stub__Z23serialsum_accrossthreadPjijPjij:
.LFB3699:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z23serialsum_accrossthreadPjij(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3699:
.size _Z45__device_stub__Z23serialsum_accrossthreadPjijPjij, .-_Z45__device_stub__Z23serialsum_accrossthreadPjijPjij
.globl _Z23serialsum_accrossthreadPjij
.type _Z23serialsum_accrossthreadPjij, @function
_Z23serialsum_accrossthreadPjij:
.LFB3700:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z23serialsum_accrossthreadPjijPjij
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3700:
.size _Z23serialsum_accrossthreadPjij, .-_Z23serialsum_accrossthreadPjij
.globl _Z33__device_stub__Z11mergethreadPjijPjij
.type _Z33__device_stub__Z11mergethreadPjijPjij, @function
_Z33__device_stub__Z11mergethreadPjijPjij:
.LFB3701:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z11mergethreadPjij(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3701:
.size _Z33__device_stub__Z11mergethreadPjijPjij, .-_Z33__device_stub__Z11mergethreadPjijPjij
.globl _Z11mergethreadPjij
.type _Z11mergethreadPjij, @function
_Z11mergethreadPjij:
.LFB3702:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z11mergethreadPjijPjij
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3702:
.size _Z11mergethreadPjij, .-_Z11mergethreadPjij
.globl _Z32__device_stub__Z10mergeblockPjijPjij
.type _Z32__device_stub__Z10mergeblockPjijPjij, @function
_Z32__device_stub__Z10mergeblockPjijPjij:
.LFB3703:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L41
.L37:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L42
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z10mergeblockPjij(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L37
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3703:
.size _Z32__device_stub__Z10mergeblockPjijPjij, .-_Z32__device_stub__Z10mergeblockPjijPjij
.globl _Z10mergeblockPjij
.type _Z10mergeblockPjij, @function
_Z10mergeblockPjij:
.LFB3704:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z10mergeblockPjijPjij
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3704:
.size _Z10mergeblockPjij, .-_Z10mergeblockPjij
.section .rodata.str1.1
.LC3:
.string "data preparation done"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "/home/ubuntu/Datasets/stackv2/train-structured/Haoping-Xiao/Parallel-Programming/main/prefixsum/prefixsum.cu"
.align 8
.LC5:
.string "cudaMalloc((void**)&d_in,n*sizeof(unsigned int))"
.align 8
.LC6:
.string "cudaMalloc((void**)&d_sum,n*sizeof(unsigned int))"
.align 8
.LC7:
.string "cudaMemset(d_sum,0,n*sizeof(unsigned int))"
.align 8
.LC8:
.string "cudaMemcpy(d_in,data,n * sizeof(unsigned int), cudaMemcpyHostToDevice)"
.section .rodata.str1.1
.LC9:
.string "cudaGetLastError()"
.section .rodata.str1.8
.align 8
.LC10:
.string "cudaMemcpy(inter_sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)"
.section .rodata.str1.1
.LC11:
.string "i: "
.LC12:
.string "error!"
.LC13:
.string "vs"
.LC14:
.string "pass here 1"
.section .rodata.str1.8
.align 8
.LC15:
.string "cudaMemcpy(d_sum, inter_sum,n * sizeof(unsigned int), cudaMemcpyHostToDevice)"
.section .rodata.str1.1
.LC16:
.string "pass here"
.section .rodata.str1.8
.align 8
.LC17:
.string "cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)"
.section .rodata.str1.1
.LC18:
.string "cudaFree(d_in)"
.LC19:
.string "cudaFree(d_sum)"
.LC20:
.string "correct"
.text
.globl main
.type main, @function
main:
.LFB3672:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
leaq -1597440(%rsp), %r11
.cfi_def_cfa 11, 1597488
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $2624, %rsp
.cfi_def_cfa_offset 1600112
movq %fs:40, %rax
movq %rax, 1600056(%rsp)
xorl %eax, %eax
movl $400000, %edi
call _Znam@PLT
movq %rax, %rbx
movl $0, %eax
.L46:
movl %eax, 48(%rsp,%rax,4)
addq $1, %rax
cmpq $100000, %rax
jne .L46
movl $0, %eax
.L47:
movl %eax, (%rbx,%rax,4)
addq $1, %rax
cmpq $100000, %rax
jne .L47
movl $0, %eax
.L48:
leaq -1(%rax), %rcx
imulq %rax, %rcx
movq %rcx, %rdx
shrq $63, %rdx
addq %rcx, %rdx
sarq %rdx
movl %edx, 400048(%rsp,%rax,4)
addq $1, %rax
cmpq $100000, %rax
jne .L48
leaq .LC3(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq $0, 8(%rsp)
leaq 8(%rsp), %rdi
movl $400000, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $145, %ecx
leaq .LC4(%rip), %rbp
movq %rbp, %rdx
leaq .LC5(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movq $0, 16(%rsp)
leaq 16(%rsp), %rdi
movl $400000, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $147, %ecx
movq %rbp, %rdx
leaq .LC6(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movl $400000, %edx
movl $0, %esi
movq 16(%rsp), %rdi
call cudaMemset@PLT
movl %eax, %edi
movl $148, %ecx
movq %rbp, %rdx
leaq .LC7(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $400000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $149, %ecx
movq %rbp, %rdx
leaq .LC8(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movl $64, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $2, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L74
.L49:
call cudaGetLastError@PLT
movl %eax, %edi
movl $170, %ecx
leaq .LC4(%rip), %rdx
leaq .LC9(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movl $128000, %eax
.L50:
leaq -127999(%rax), %rcx
imulq %rax, %rcx
movq %rcx, %rdx
shrq $63, %rdx
addq %rcx, %rdx
sarq %rdx
movl %edx, 944052(%rsp,%rax,4)
addq $1, %rax
cmpq $163999, %rax
jne .L50
movl $1, %eax
.L51:
leaq -1(%rax), %rcx
imulq %rax, %rcx
movq %rcx, %rdx
shrq $63, %rdx
addq %rcx, %rdx
sarq %rdx
movl %edx, 1200048(%rsp,%rax,4)
addq $1, %rax
cmpq $64001, %rax
jne .L51
movl $64, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L75
.L52:
call cudaGetLastError@PLT
movl %eax, %edi
movl $210, %ecx
leaq .LC4(%rip), %rdx
leaq .LC9(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movl $64, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $2, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L76
.L53:
call cudaGetLastError@PLT
movl %eax, %edi
movl $240, %ecx
leaq .LC4(%rip), %rbp
movq %rbp, %rdx
leaq .LC9(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
leaq 800048(%rsp), %rdi
movl $2, %ecx
movl $400000, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl %eax, %edi
movl $245, %ecx
movq %rbp, %rdx
leaq .LC10(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movl $64001, %ebp
leaq 800048(%rsp), %rax
.L56:
movl 1200048(%rsp,%rbp,4), %r12d
cmpl (%rax,%rbp,4), %r12d
jne .L77
addq $1, %rbp
cmpq $100000, %rbp
jne .L56
jmp .L55
.L74:
movl $100000, %ecx
movl $1000, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z32__device_stub__Z9prefixsumPjS_ijPjS_ij
jmp .L49
.L75:
movl $100000, %edx
movl $1000, %esi
movq 16(%rsp), %rdi
call _Z45__device_stub__Z23serialsum_accrossthreadPjijPjij
jmp .L52
.L76:
movl $100000, %edx
movl $1000, %esi
movq 16(%rsp), %rdi
call _Z33__device_stub__Z11mergethreadPjijPjij
jmp .L53
.L77:
leaq .LC11(%rip), %rsi
leaq _ZSt4cout(%rip), %r13
movq %r13, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC12(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl %r12d, %esi
movq %r13, %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC13(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movslq %ebp, %rbp
movl 800048(%rsp,%rbp,4), %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
.L55:
leaq .LC14(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq 800048(%rsp), %rbp
movl $64, %ecx
movl $100000, %edx
movl $1000, %esi
movq %rbp, %rdi
call _Z22serialsum_accrossblockPjiji
movl $1, %ecx
movl $400000, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $257, %ecx
leaq .LC4(%rip), %rdx
leaq .LC15(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movl $1, %ebp
leaq 800048(%rsp), %rax
.L59:
movl 1200048(%rsp,%rbp,4), %r12d
cmpl (%rax,%rbp,4), %r12d
jne .L78
addq $1, %rbp
cmpq $100000, %rbp
jne .L59
jmp .L58
.L78:
leaq .LC11(%rip), %rsi
leaq _ZSt4cout(%rip), %r13
movq %r13, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC12(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl %r12d, %esi
movq %r13, %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC13(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movslq %ebp, %rbp
movl 800048(%rsp,%rbp,4), %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
.L58:
leaq .LC16(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $64, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L79
.L60:
call cudaGetLastError@PLT
movl %eax, %edi
movl $288, %ecx
leaq .LC4(%rip), %rbp
movq %rbp, %rdx
leaq .LC9(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movl $2, %ecx
movl $400000, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $293, %ecx
movq %rbp, %rdx
leaq .LC17(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movq 8(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $294, %ecx
movq %rbp, %rdx
leaq .LC18(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movq 16(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $295, %ecx
movq %rbp, %rdx
leaq .LC19(%rip), %rsi
call _Z5checkI9cudaErrorEvT_PKcS3_i
movl $0, %ebp
.L63:
movl 400048(%rsp,%rbp,4), %r12d
cmpl (%rbx), %r12d
jne .L80
addq $1, %rbp
addq $4, %rbx
cmpq $100000, %rbp
jne .L63
leaq .LC20(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
jmp .L62
.L79:
movl $100000, %edx
movl $1000, %esi
movq 16(%rsp), %rdi
call _Z32__device_stub__Z10mergeblockPjijPjij
jmp .L60
.L80:
leaq .LC11(%rip), %rsi
leaq _ZSt4cout(%rip), %r13
movq %r13, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %rbp, %rsi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC12(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl %r12d, %esi
movq %r13, %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC13(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl (%rbx), %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
.L62:
movq 1600056(%rsp), %rax
subq %fs:40, %rax
jne .L81
movl $0, %eax
addq $1600064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L81:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3672:
.size main, .-main
.section .rodata.str1.1
.LC21:
.string "_Z10mergeblockPjij"
.LC22:
.string "_Z11mergethreadPjij"
.section .rodata.str1.8
.align 8
.LC23:
.string "_Z23serialsum_accrossthreadPjij"
.section .rodata.str1.1
.LC24:
.string "_Z9prefixsumPjS_ij"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3706:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC21(%rip), %rdx
movq %rdx, %rcx
leaq _Z10mergeblockPjij(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC22(%rip), %rdx
movq %rdx, %rcx
leaq _Z11mergethreadPjij(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC23(%rip), %rdx
movq %rdx, %rcx
leaq _Z23serialsum_accrossthreadPjij(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC24(%rip), %rdx
movq %rdx, %rcx
leaq _Z9prefixsumPjS_ij(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3706:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
template<typename T>
static inline void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
#define CHECK(x) check(x, #x, __FILE__, __LINE__)
// template <class T>
// void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
// CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
// }
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
// printf("checing len: %p",len);
// printf("checing n: %p",n);
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
// int offset=2*step-1;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// int step=len*blockDim.x;
// // int offset=2*step-1;
// int offset=2*step;
// unsigned int start= blockDim.x*step*index+offset;
// unsigned int end= blockDim.x*step*(index+1);
// for(unsigned int i=start; i<end && i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index==0) return; //the first block is not needed to merge
int step=len*blockDim.x;
int start=index*step+1; //exclusive
// int end=start+step;
int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[blockIdx.x*len*blockDim.x-1];//last element at last block
int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
int main(){
const unsigned int n=100000; //100000 number
unsigned int data[n];
unsigned int result[n];
unsigned int inter_sum[n];
unsigned int inter_result[n];
unsigned int *cal_result=new unsigned int [n];
for (unsigned int i=0; i<n; i++){
data[i]=i;
}
for (unsigned int i=0; i<n; i++){
cal_result[i]=i;
}
for (long long i=0; i<n; i++){
result[i]=(i-1)*i/2;
}
std::cout<< "data preparation done"<<std::endl;
const int block_size=64;//64 threads per block;
const int len=1000; // add 1000 prefix sum per thread;
unsigned int *d_in=NULL;
CHECK(cudaMalloc((void**)&d_in,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(cudaMalloc((void**)&d_sum,n*sizeof(unsigned int)));
CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int)));
CHECK(cudaMemcpy(d_in,data,n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice);
// std::cout<< divup(n,block_size*len) <<std::endl;
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
prefixsum<<<divup(n,block_size*len),block_size>>>(d_in,d_sum,len,n);
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=64001; i<65001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass here"<<std::endl;
CHECK(cudaGetLastError());
long long start=64001;
// int end=start+1000;
unsigned int end=100000;
// std::cout<< end*end<<std::endl;
for (unsigned int i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=((start-1+i-1)*(i-start+1))/2;
}
start=1;
end=start+64000;
for (long long i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=(i-1)*(i-start+1)/2;
}
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// for (unsigned int i=65000; i<66000; i++){
// inter_result[i]=(65000+i)*(i-65000+1)/2;
// }
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
// for (unsigned int i=1001; i<2001; i++){
// inter_result[i]=(1000+i-1)*(i-1000)/2;
// }
// inter_result[2000]+=result[1000];
serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int i=1001; i<2001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"first: i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// std::cout << "pass first one"<<std::endl;
// for (int i=64000; i<65000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// inter_result[65999]+=inter_result[64999];
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(inter_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//serial sum
CHECK(cudaMemcpy(inter_sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int i=64001; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here 1"<<std::endl;
serialsum_accrossblock(inter_sum, len, n, block_size);
CHECK(cudaMemcpy(d_sum, inter_sum,n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int i=1; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here"<<std::endl;
// serialsum_accrossblock<<<divup(n,block_size*len*block_size*block_size) ,block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=0; i<100000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// for (unsigned int i=64000; i<100000; i++){
// inter_result[i]+=inter_result[63999];
// }
// std::cout<< divup(n,block_size*len) << std::endl;
mergeblock<<<divup(n,block_size*len*block_size) ,block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// cuda_memcpy(cal_result, d_sum, n, cudaMemcpyDeviceToHost);
CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_sum));
//compare
unsigned int i;
for (i=0; i<n; i++){
if(result[i]!=cal_result[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<<result[i]<<"vs"<<cal_result[i]<<std::endl;
break;
}
}
if(i==n){
std::cout<<"correct"<<std::endl;
}
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
template<typename T>
static inline void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
#define CHECK(x) check(x, #x, __FILE__, __LINE__)
// template <class T>
// void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
// CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
// }
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
// printf("checing len: %p",len);
// printf("checing n: %p",n);
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
// int offset=2*step-1;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// int step=len*blockDim.x;
// // int offset=2*step-1;
// int offset=2*step;
// unsigned int start= blockDim.x*step*index+offset;
// unsigned int end= blockDim.x*step*(index+1);
// for(unsigned int i=start; i<end && i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index==0) return; //the first block is not needed to merge
int step=len*blockDim.x;
int start=index*step+1; //exclusive
// int end=start+step;
int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[blockIdx.x*len*blockDim.x-1];//last element at last block
int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
int main(){
const unsigned int n=100000; //100000 number
unsigned int data[n];
unsigned int result[n];
unsigned int inter_sum[n];
unsigned int inter_result[n];
unsigned int *cal_result=new unsigned int [n];
for (unsigned int i=0; i<n; i++){
data[i]=i;
}
for (unsigned int i=0; i<n; i++){
cal_result[i]=i;
}
for (long long i=0; i<n; i++){
result[i]=(i-1)*i/2;
}
std::cout<< "data preparation done"<<std::endl;
const int block_size=64;//64 threads per block;
const int len=1000; // add 1000 prefix sum per thread;
unsigned int *d_in=NULL;
CHECK(hipMalloc((void**)&d_in,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(hipMalloc((void**)&d_sum,n*sizeof(unsigned int)));
CHECK(hipMemset(d_sum,0,n*sizeof(unsigned int)));
CHECK(hipMemcpy(d_in,data,n * sizeof(unsigned int), hipMemcpyHostToDevice));
// cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice);
// std::cout<< divup(n,block_size*len) <<std::endl;
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
prefixsum<<<divup(n,block_size*len),block_size>>>(d_in,d_sum,len,n);
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=64001; i<65001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass here"<<std::endl;
CHECK(hipGetLastError());
long long start=64001;
// int end=start+1000;
unsigned int end=100000;
// std::cout<< end*end<<std::endl;
for (unsigned int i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=((start-1+i-1)*(i-start+1))/2;
}
start=1;
end=start+64000;
for (long long i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=(i-1)*(i-start+1)/2;
}
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// for (unsigned int i=65000; i<66000; i++){
// inter_result[i]=(65000+i)*(i-65000+1)/2;
// }
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
// for (unsigned int i=1001; i<2001; i++){
// inter_result[i]=(1000+i-1)*(i-1000)/2;
// }
// inter_result[2000]+=result[1000];
serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int i=1001; i<2001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"first: i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// std::cout << "pass first one"<<std::endl;
// for (int i=64000; i<65000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// inter_result[65999]+=inter_result[64999];
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
// CHECK(cudaMemcpy(inter_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//serial sum
CHECK(hipMemcpy(inter_sum, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost));
for (int i=64001; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here 1"<<std::endl;
serialsum_accrossblock(inter_sum, len, n, block_size);
CHECK(hipMemcpy(d_sum, inter_sum,n * sizeof(unsigned int), hipMemcpyHostToDevice));
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int i=1; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here"<<std::endl;
// serialsum_accrossblock<<<divup(n,block_size*len*block_size*block_size) ,block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=0; i<100000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// for (unsigned int i=64000; i<100000; i++){
// inter_result[i]+=inter_result[63999];
// }
// std::cout<< divup(n,block_size*len) << std::endl;
mergeblock<<<divup(n,block_size*len*block_size) ,block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
// cuda_memcpy(cal_result, d_sum, n, cudaMemcpyDeviceToHost);
CHECK(hipMemcpy(cal_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost));
CHECK(hipFree(d_in));
CHECK(hipFree(d_sum));
//compare
unsigned int i;
for (i=0; i<n; i++){
if(result[i]!=cal_result[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<<result[i]<<"vs"<<cal_result[i]<<std::endl;
break;
}
}
if(i==n){
std::cout<<"correct"<<std::endl;
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
template<typename T>
static inline void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
#define CHECK(x) check(x, #x, __FILE__, __LINE__)
// template <class T>
// void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
// CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
// }
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
// printf("checing len: %p",len);
// printf("checing n: %p",n);
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
// int offset=2*step-1;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// int step=len*blockDim.x;
// // int offset=2*step-1;
// int offset=2*step;
// unsigned int start= blockDim.x*step*index+offset;
// unsigned int end= blockDim.x*step*(index+1);
// for(unsigned int i=start; i<end && i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index==0) return; //the first block is not needed to merge
int step=len*blockDim.x;
int start=index*step+1; //exclusive
// int end=start+step;
int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[blockIdx.x*len*blockDim.x-1];//last element at last block
int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
int main(){
const unsigned int n=100000; //100000 number
unsigned int data[n];
unsigned int result[n];
unsigned int inter_sum[n];
unsigned int inter_result[n];
unsigned int *cal_result=new unsigned int [n];
for (unsigned int i=0; i<n; i++){
data[i]=i;
}
for (unsigned int i=0; i<n; i++){
cal_result[i]=i;
}
for (long long i=0; i<n; i++){
result[i]=(i-1)*i/2;
}
std::cout<< "data preparation done"<<std::endl;
const int block_size=64;//64 threads per block;
const int len=1000; // add 1000 prefix sum per thread;
unsigned int *d_in=NULL;
CHECK(hipMalloc((void**)&d_in,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(hipMalloc((void**)&d_sum,n*sizeof(unsigned int)));
CHECK(hipMemset(d_sum,0,n*sizeof(unsigned int)));
CHECK(hipMemcpy(d_in,data,n * sizeof(unsigned int), hipMemcpyHostToDevice));
// cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice);
// std::cout<< divup(n,block_size*len) <<std::endl;
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
prefixsum<<<divup(n,block_size*len),block_size>>>(d_in,d_sum,len,n);
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=64001; i<65001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass here"<<std::endl;
CHECK(hipGetLastError());
long long start=64001;
// int end=start+1000;
unsigned int end=100000;
// std::cout<< end*end<<std::endl;
for (unsigned int i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=((start-1+i-1)*(i-start+1))/2;
}
start=1;
end=start+64000;
for (long long i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=(i-1)*(i-start+1)/2;
}
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// for (unsigned int i=65000; i<66000; i++){
// inter_result[i]=(65000+i)*(i-65000+1)/2;
// }
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
// for (unsigned int i=1001; i<2001; i++){
// inter_result[i]=(1000+i-1)*(i-1000)/2;
// }
// inter_result[2000]+=result[1000];
serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int i=1001; i<2001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"first: i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// std::cout << "pass first one"<<std::endl;
// for (int i=64000; i<65000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// inter_result[65999]+=inter_result[64999];
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
// CHECK(cudaMemcpy(inter_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//serial sum
CHECK(hipMemcpy(inter_sum, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost));
for (int i=64001; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here 1"<<std::endl;
serialsum_accrossblock(inter_sum, len, n, block_size);
CHECK(hipMemcpy(d_sum, inter_sum,n * sizeof(unsigned int), hipMemcpyHostToDevice));
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int i=1; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here"<<std::endl;
// serialsum_accrossblock<<<divup(n,block_size*len*block_size*block_size) ,block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=0; i<100000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// for (unsigned int i=64000; i<100000; i++){
// inter_result[i]+=inter_result[63999];
// }
// std::cout<< divup(n,block_size*len) << std::endl;
mergeblock<<<divup(n,block_size*len*block_size) ,block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
// cuda_memcpy(cal_result, d_sum, n, cudaMemcpyDeviceToHost);
CHECK(hipMemcpy(cal_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost));
CHECK(hipFree(d_in));
CHECK(hipFree(d_sum));
//compare
unsigned int i;
for (i=0; i<n; i++){
if(result[i]!=cal_result[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<<result[i]<<"vs"<<cal_result[i]<<std::endl;
break;
}
}
if(i==n){
std::cout<<"correct"<<std::endl;
}
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9prefixsumPjS_ij
.globl _Z9prefixsumPjS_ij
.p2align 8
.type _Z9prefixsumPjS_ij,@function
_Z9prefixsumPjS_ij:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_mov_b32 s4, exec_lo
v_mul_lo_u32 v0, v1, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, 1, v0
v_cmpx_ge_u32_e64 s3, v2
s_cbranch_execz .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
v_add_nc_u32_e32 v5, s2, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[3:4], 2, v[0:1]
v_add_nc_u32_e32 v0, 2, v0
v_min_u32_e32 v5, s3, v5
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
global_load_b32 v4, v[3:4], off
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[2:3]
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
v_cmp_lt_u32_e32 vcc_lo, v0, v5
s_waitcnt vmcnt(0)
global_store_b32 v[1:2], v4, off
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_4
v_mov_b32_e32 v2, 0
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mov_b32_e32 v1, v2
v_lshlrev_b64 v[3:4], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
.p2align 6
.LBB0_3:
v_add_nc_u32_e32 v1, -1, v0
global_load_b32 v10, v[3:4], off
v_add_nc_u32_e32 v0, 1, v0
v_lshlrev_b64 v[6:7], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s4, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo
v_cmp_ge_u32_e32 vcc_lo, v0, v5
global_load_b32 v1, v[8:9], off
global_load_b32 v6, v[6:7], off
s_or_b32 s1, vcc_lo, s1
s_waitcnt vmcnt(0)
v_add3_u32 v1, v6, v1, v10
global_store_b32 v[3:4], v1, off
v_add_co_u32 v3, s0, v3, 4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s0, 0, v4, s0
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_3
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9prefixsumPjS_ij
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9prefixsumPjS_ij, .Lfunc_end0-_Z9prefixsumPjS_ij
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z23serialsum_accrossthreadPjij
.globl _Z23serialsum_accrossthreadPjij
.p2align 8
.type _Z23serialsum_accrossthreadPjij,@function
_Z23serialsum_accrossthreadPjij:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_mul_i32 s5, s4, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v0, v1, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add3_u32 v3, v0, s5, 1
v_lshl_add_u32 v2, s2, 1, v0
v_min_u32_e32 v3, s3, v3
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_u32_e64 v2, v3
s_cbranch_execz .LBB1_3
s_load_b64 s[0:1], s[0:1], 0x0
v_mul_lo_u32 v4, v1, s4
s_mov_b32 s4, 0
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s2, v4, s[2:3]
s_mov_b32 s3, 0
.p2align 6
.LBB1_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_dual_mov_b32 v5, 0 :: v_dual_add_nc_u32 v4, s4, v0
v_add_nc_u32_e32 v6, s4, v2
s_add_i32 s4, s4, s2
v_mov_b32_e32 v7, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_lshlrev_b64 v[6:7], 2, v[6:7]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v6, vcc_lo, s0, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v7, vcc_lo
s_clause 0x1
global_load_b32 v1, v[4:5], off
global_load_b32 v4, v[6:7], off
v_add_nc_u32_e32 v5, s4, v2
s_delay_alu instid0(VALU_DEP_1)
v_cmp_ge_u32_e32 vcc_lo, v5, v3
s_or_b32 s3, vcc_lo, s3
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v4, v1
global_store_b32 v[6:7], v1, off
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB1_2
.LBB1_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23serialsum_accrossthreadPjij
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z23serialsum_accrossthreadPjij, .Lfunc_end1-_Z23serialsum_accrossthreadPjij
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z11mergethreadPjij
.globl _Z11mergethreadPjij
.p2align 8
.type _Z11mergethreadPjij,@function
_Z11mergethreadPjij:
s_mov_b32 s2, exec_lo
v_cmpx_ne_u32_e32 0, v0
s_cbranch_execz .LBB2_4
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mul_lo_u32 v1, v1, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, s2, v1
v_min_u32_e32 v3, s3, v0
v_add_nc_u32_e32 v0, 1, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmp_lt_u32_e32 vcc_lo, v0, v3
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB2_4
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[1:2]
v_mov_b32_e32 v1, v2
v_lshlrev_b64 v[1:2], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_load_b32 v4, v[4:5], off
s_mov_b32 s1, 0
.LBB2_3:
global_load_b32 v5, v[1:2], off
v_add_nc_u32_e32 v0, 1, v0
s_delay_alu instid0(VALU_DEP_1)
v_cmp_ge_u32_e32 vcc_lo, v0, v3
s_or_b32 s1, vcc_lo, s1
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v5, v5, v4
global_store_b32 v[1:2], v5, off
v_add_co_u32 v1, s0, v1, 4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v2, s0, 0, v2, s0
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB2_3
.LBB2_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11mergethreadPjij
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z11mergethreadPjij, .Lfunc_end2-_Z11mergethreadPjij
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z10mergeblockPjij
.globl _Z10mergeblockPjij
.p2align 8
.type _Z10mergeblockPjij,@function
_Z10mergeblockPjij:
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s2, 0xffff
s_mov_b32 s2, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_ne_u32_e32 0, v1
s_cbranch_execz .LBB3_4
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_mul_i32 s4, s4, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v1, s4
v_add_nc_u32_e32 v4, s4, v0
v_add_nc_u32_e32 v2, 1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_lt_i32_e32 vcc_lo, v2, v4
v_cmp_gt_u32_e64 s2, s3, v2
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB3_4
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v5, vcc_lo, s0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo
global_load_b32 v5, v[5:6], off
v_lshlrev_b64 v[6:7], 2, v[2:3]
v_add_nc_u32_e32 v2, 2, v0
v_add_co_u32 v0, vcc_lo, s0, v6
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v7, vcc_lo
s_mov_b32 s1, 0
.p2align 6
.LBB3_3:
global_load_b32 v3, v[0:1], off
v_cmp_ge_i32_e32 vcc_lo, v2, v4
v_cmp_le_u32_e64 s0, s3, v2
v_add_nc_u32_e32 v2, 1, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s0, vcc_lo, s0
s_and_b32 s0, exec_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s1, s0, s1
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v3, v3, v5
global_store_b32 v[0:1], v3, off
v_add_co_u32 v0, vcc_lo, v0, 4
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB3_3
.LBB3_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10mergeblockPjij
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z10mergeblockPjij, .Lfunc_end3-_Z10mergeblockPjij
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9prefixsumPjS_ij
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9prefixsumPjS_ij.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23serialsum_accrossthreadPjij
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23serialsum_accrossthreadPjij.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11mergethreadPjij
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11mergethreadPjij.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10mergeblockPjij
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10mergeblockPjij.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
template<typename T>
static inline void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
#define CHECK(x) check(x, #x, __FILE__, __LINE__)
// template <class T>
// void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
// CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
// }
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
// printf("checing len: %p",len);
// printf("checing n: %p",n);
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
// int offset=2*step-1;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// int step=len*blockDim.x;
// // int offset=2*step-1;
// int offset=2*step;
// unsigned int start= blockDim.x*step*index+offset;
// unsigned int end= blockDim.x*step*(index+1);
// for(unsigned int i=start; i<end && i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index==0) return; //the first block is not needed to merge
int step=len*blockDim.x;
int start=index*step+1; //exclusive
// int end=start+step;
int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[blockIdx.x*len*blockDim.x-1];//last element at last block
int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
int main(){
const unsigned int n=100000; //100000 number
unsigned int data[n];
unsigned int result[n];
unsigned int inter_sum[n];
unsigned int inter_result[n];
unsigned int *cal_result=new unsigned int [n];
for (unsigned int i=0; i<n; i++){
data[i]=i;
}
for (unsigned int i=0; i<n; i++){
cal_result[i]=i;
}
for (long long i=0; i<n; i++){
result[i]=(i-1)*i/2;
}
std::cout<< "data preparation done"<<std::endl;
const int block_size=64;//64 threads per block;
const int len=1000; // add 1000 prefix sum per thread;
unsigned int *d_in=NULL;
CHECK(hipMalloc((void**)&d_in,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(hipMalloc((void**)&d_sum,n*sizeof(unsigned int)));
CHECK(hipMemset(d_sum,0,n*sizeof(unsigned int)));
CHECK(hipMemcpy(d_in,data,n * sizeof(unsigned int), hipMemcpyHostToDevice));
// cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice);
// std::cout<< divup(n,block_size*len) <<std::endl;
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
prefixsum<<<divup(n,block_size*len),block_size>>>(d_in,d_sum,len,n);
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=64001; i<65001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass here"<<std::endl;
CHECK(hipGetLastError());
long long start=64001;
// int end=start+1000;
unsigned int end=100000;
// std::cout<< end*end<<std::endl;
for (unsigned int i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=((start-1+i-1)*(i-start+1))/2;
}
start=1;
end=start+64000;
for (long long i=start; i<end; i++){
// int index=i-64000;
inter_result[i]=(i-1)*(i-start+1)/2;
}
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// for (unsigned int i=65000; i<66000; i++){
// inter_result[i]=(65000+i)*(i-65000+1)/2;
// }
// for (long long i=64001; i<65001; i++){
// inter_result[i]=(64000+i-1)*(i-64000)/2;
// }
// for (unsigned int i=1001; i<2001; i++){
// inter_result[i]=(1000+i-1)*(i-1000)/2;
// }
// inter_result[2000]+=result[1000];
serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int i=1001; i<2001; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"first: i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// std::cout<<"pass"<<std::endl;
// std::cout << "pass first one"<<std::endl;
// for (int i=64000; i<65000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
// inter_result[65999]+=inter_result[64999];
// for (int i=65000; i<66000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i << " " << cal_result[i] <<"error!"<<std::endl;
// break;
// // return 0;
// }
// }
mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
// CHECK(cudaMemcpy(inter_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
//serial sum
CHECK(hipMemcpy(inter_sum, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost));
for (int i=64001; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here 1"<<std::endl;
serialsum_accrossblock(inter_sum, len, n, block_size);
CHECK(hipMemcpy(d_sum, inter_sum,n * sizeof(unsigned int), hipMemcpyHostToDevice));
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int i=1; i<100000; i++){
if(inter_result[i]!=inter_sum[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<< inter_result[i] << "vs" << inter_sum[i] <<std::endl;
break;
// return 0;
}
}
std::cout<<"pass here"<<std::endl;
// serialsum_accrossblock<<<divup(n,block_size*len*block_size*block_size) ,block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(cal_result, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for (int i=0; i<100000; i++){
// if(inter_result[i]!=cal_result[i]){
// std::cout<<"i: "<< i <<"error!"<<std::endl;
// std::cout<< inter_result[i] << "vs" << cal_result[i] <<std::endl;
// break;
// // return 0;
// }
// }
// for (unsigned int i=64000; i<100000; i++){
// inter_result[i]+=inter_result[63999];
// }
// std::cout<< divup(n,block_size*len) << std::endl;
mergeblock<<<divup(n,block_size*len*block_size) ,block_size>>>(d_sum,len,n);
CHECK(hipGetLastError());
// cuda_memcpy(cal_result, d_sum, n, cudaMemcpyDeviceToHost);
CHECK(hipMemcpy(cal_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost));
CHECK(hipFree(d_in));
CHECK(hipFree(d_sum));
//compare
unsigned int i;
for (i=0; i<n; i++){
if(result[i]!=cal_result[i]){
std::cout<<"i: "<< i <<"error!"<<std::endl;
std::cout<<result[i]<<"vs"<<cal_result[i]<<std::endl;
break;
}
}
if(i==n){
std::cout<<"correct"<<std::endl;
}
return 0;
} | .text
.file "prefixsum.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z24__device_stub__prefixsumPjS_ij # -- Begin function _Z24__device_stub__prefixsumPjS_ij
.p2align 4, 0x90
.type _Z24__device_stub__prefixsumPjS_ij,@function
_Z24__device_stub__prefixsumPjS_ij: # @_Z24__device_stub__prefixsumPjS_ij
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9prefixsumPjS_ij, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__prefixsumPjS_ij, .Lfunc_end0-_Z24__device_stub__prefixsumPjS_ij
.cfi_endproc
# -- End function
.globl _Z38__device_stub__serialsum_accrossthreadPjij # -- Begin function _Z38__device_stub__serialsum_accrossthreadPjij
.p2align 4, 0x90
.type _Z38__device_stub__serialsum_accrossthreadPjij,@function
_Z38__device_stub__serialsum_accrossthreadPjij: # @_Z38__device_stub__serialsum_accrossthreadPjij
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z23serialsum_accrossthreadPjij, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z38__device_stub__serialsum_accrossthreadPjij, .Lfunc_end1-_Z38__device_stub__serialsum_accrossthreadPjij
.cfi_endproc
# -- End function
.globl _Z26__device_stub__mergethreadPjij # -- Begin function _Z26__device_stub__mergethreadPjij
.p2align 4, 0x90
.type _Z26__device_stub__mergethreadPjij,@function
_Z26__device_stub__mergethreadPjij: # @_Z26__device_stub__mergethreadPjij
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z11mergethreadPjij, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z26__device_stub__mergethreadPjij, .Lfunc_end2-_Z26__device_stub__mergethreadPjij
.cfi_endproc
# -- End function
.globl _Z22serialsum_accrossblockPjiji # -- Begin function _Z22serialsum_accrossblockPjiji
.p2align 4, 0x90
.type _Z22serialsum_accrossblockPjiji,@function
_Z22serialsum_accrossblockPjiji: # @_Z22serialsum_accrossblockPjiji
.cfi_startproc
# %bb.0:
# kill: def $esi killed $esi def $rsi
imull %ecx, %esi
leal (%rsi,%rsi), %eax
cmpl %edx, %eax
jae .LBB3_3
# %bb.1:
movl %esi, %ecx
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %ecx, %r8d
movl (%rdi,%r8,4), %r8d
movl %eax, %r9d
addl %r8d, (%rdi,%r9,4)
addl %esi, %eax
addl %esi, %ecx
cmpl %edx, %eax
jb .LBB3_2
.LBB3_3: # %._crit_edge
retq
.Lfunc_end3:
.size _Z22serialsum_accrossblockPjiji, .Lfunc_end3-_Z22serialsum_accrossblockPjiji
.cfi_endproc
# -- End function
.globl _Z25__device_stub__mergeblockPjij # -- Begin function _Z25__device_stub__mergeblockPjij
.p2align 4, 0x90
.type _Z25__device_stub__mergeblockPjij,@function
_Z25__device_stub__mergeblockPjij: # @_Z25__device_stub__mergeblockPjij
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z10mergeblockPjij, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end4:
.size _Z25__device_stub__mergeblockPjij, .Lfunc_end4-_Z25__device_stub__mergeblockPjij
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1600120, %rsp # imm = 0x186A78
.cfi_def_cfa_offset 1600176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $400000, %edi # imm = 0x61A80
callq _Znam
movq %rax, %rbx
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_1: # =>This Inner Loop Header: Depth=1
movl %eax, 1200112(%rsp,%rax,4)
incq %rax
cmpq $100000, %rax # imm = 0x186A0
jne .LBB5_1
# %bb.2: # %.preheader171.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_3: # %.preheader171
# =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,4)
incq %rax
cmpq $100000, %rax # imm = 0x186A0
jne .LBB5_3
# %bb.4: # %.preheader170.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_5: # %.preheader170
# =>This Inner Loop Header: Depth=1
leaq -1(%rax), %rcx
imulq %rax, %rcx
movq %rcx, %rdx
shrq $63, %rdx
addq %rcx, %rdx
shrq %rdx
movl %edx, 800112(%rsp,%rax,4)
incq %rax
cmpq $100000, %rax # imm = 0x186A0
jne .LBB5_5
# %bb.6:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $21, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB5_72
# %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB5_9
# %bb.8:
movzbl 67(%r14), %eax
jmp .LBB5_10
.LBB5_9:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movabsq $4294967360, %r14 # imm = 0x100000040
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq $0, 72(%rsp)
leaq 72(%rsp), %rdi
movl $400000, %esi # imm = 0x61A80
callq hipMalloc
movl $.L.str.1, %esi
movl %eax, %edi
movl $147, %edx
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movq $0, 8(%rsp)
leaq 8(%rsp), %rdi
movl $400000, %esi # imm = 0x61A80
callq hipMalloc
movl $.L.str.3, %esi
movl %eax, %edi
movl $149, %edx
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movq 8(%rsp), %rdi
movl $400000, %edx # imm = 0x61A80
xorl %esi, %esi
callq hipMemset
movl $.L.str.4, %esi
movl %eax, %edi
movl $150, %edx
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movq 72(%rsp), %rdi
leaq 1200112(%rsp), %rsi
movl $400000, %edx # imm = 0x61A80
movl $1, %ecx
callq hipMemcpy
movl $.L.str.5, %esi
movl %eax, %edi
movl $151, %edx
callq _ZL5checkI10hipError_tEvT_PKcS3_i
leaq -62(%r14), %r12
movq %r12, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_12
# %bb.11:
movq 72(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 40(%rsp)
movq %rcx, 56(%rsp)
movl $1000, 16(%rsp) # imm = 0x3E8
movl $100000, 4(%rsp) # imm = 0x186A0
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 400112(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 400112(%rsp), %rsi
movl 400120(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z9prefixsumPjS_ij, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_12:
callq hipGetLastError
movl $.L.str.6, %esi
movl %eax, %edi
movl $172, %edx
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movl $128000, %eax # imm = 0x1F400
.p2align 4, 0x90
.LBB5_13: # =>This Inner Loop Header: Depth=1
leaq -127999(%rax), %rcx
imulq %rax, %rcx
movq %rcx, %rdx
shrq $63, %rdx
addq %rcx, %rdx
shrq %rdx
movl %edx, 144116(%rsp,%rax,4)
incq %rax
cmpq $163999, %rax # imm = 0x2809F
jne .LBB5_13
# %bb.14: # %.preheader.preheader
movl $1, %eax
.p2align 4, 0x90
.LBB5_15: # %.preheader
# =>This Inner Loop Header: Depth=1
leaq -1(%rax), %rcx
imulq %rax, %rcx
movq %rcx, %rdx
shrq $63, %rdx
addq %rcx, %rdx
shrq %rdx
movl %edx, 400112(%rsp,%rax,4)
incq %rax
cmpq $64001, %rax # imm = 0xFA01
jne .LBB5_15
# %bb.16:
leaq -63(%r14), %r15
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_18
# %bb.17:
movq 8(%rsp), %rax
movq %rax, 56(%rsp)
movl $1000, 16(%rsp) # imm = 0x3E8
movl $100000, 4(%rsp) # imm = 0x186A0
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z23serialsum_accrossthreadPjij, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_18:
callq hipGetLastError
movl $.L.str.6, %esi
movl %eax, %edi
movl $212, %edx
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movq %r12, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_20
# %bb.19:
movq 8(%rsp), %rax
movq %rax, 56(%rsp)
movl $1000, 16(%rsp) # imm = 0x3E8
movl $100000, 4(%rsp) # imm = 0x186A0
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11mergethreadPjij, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_20:
callq hipGetLastError
movl $.L.str.6, %esi
movl %eax, %edi
movl $242, %edx
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movq 8(%rsp), %rsi
leaq 112(%rsp), %rdi
movl $400000, %edx # imm = 0x61A80
movl $2, %ecx
callq hipMemcpy
movl $.L.str.7, %esi
movl %eax, %edi
movl $247, %edx
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movl $64001, %r12d # imm = 0xFA01
.p2align 4, 0x90
.LBB5_21: # =>This Inner Loop Header: Depth=1
movl 400112(%rsp,%r12,4), %r13d
cmpl 112(%rsp,%r12,4), %r13d
jne .LBB5_22
# %bb.31: # in Loop: Header=BB5_21 Depth=1
incq %r12
cmpq $100000, %r12 # imm = 0x186A0
jne .LBB5_21
jmp .LBB5_32
.LBB5_22:
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $3, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r12d, %esi
callq _ZNSolsEi
movq %rax, %rbp
movl $.L.str.9, %esi
movl $6, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r14
testq %r14, %r14
je .LBB5_72
# %bb.23: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i103
cmpb $0, 56(%r14)
je .LBB5_25
# %bb.24:
movzbl 67(%r14), %eax
jmp .LBB5_26
.LBB5_25:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_26: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit106
movsbl %al, %esi
movq %rbp, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movq %r13, %rsi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r14
movl $.L.str.10, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 112(%rsp,%r12,4), %esi
movq %r14, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB5_72
# %bb.27: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i108
cmpb $0, 56(%r14)
je .LBB5_29
# %bb.28:
movzbl 67(%r14), %ecx
jmp .LBB5_30
.LBB5_29:
movq %r14, %rdi
movq %rax, %r12
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r12, %rax
.LBB5_30: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit111
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
.LBB5_32: # %.loopexit169
movl $_ZSt4cout, %edi
movl $.L.str.11, %esi
movl $11, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB5_72
# %bb.33: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i113
cmpb $0, 56(%r14)
je .LBB5_35
# %bb.34:
movzbl 67(%r14), %eax
jmp .LBB5_36
.LBB5_35:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_36: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit116
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 8(%rsp), %rdi
leaq 112(%rsp), %rsi
movl $400000, %edx # imm = 0x61A80
movl $1, %ecx
callq hipMemcpy
movl $.L.str.12, %esi
movl %eax, %edi
movl $259, %edx # imm = 0x103
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movl $1, %r12d
.p2align 4, 0x90
.LBB5_37: # =>This Inner Loop Header: Depth=1
movl 400112(%rsp,%r12,4), %r13d
cmpl 112(%rsp,%r12,4), %r13d
jne .LBB5_38
# %bb.47: # in Loop: Header=BB5_37 Depth=1
incq %r12
cmpq $100000, %r12 # imm = 0x186A0
jne .LBB5_37
jmp .LBB5_48
.LBB5_38:
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $3, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r12d, %esi
callq _ZNSolsEi
movq %rax, %rbp
movl $.L.str.9, %esi
movl $6, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r14
testq %r14, %r14
je .LBB5_72
# %bb.39: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i118
cmpb $0, 56(%r14)
je .LBB5_41
# %bb.40:
movzbl 67(%r14), %eax
jmp .LBB5_42
.LBB5_41:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_42: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit121
movsbl %al, %esi
movq %rbp, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movq %r13, %rsi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r14
movl $.L.str.10, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 112(%rsp,%r12,4), %esi
movq %r14, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB5_72
# %bb.43: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i123
cmpb $0, 56(%r14)
je .LBB5_45
# %bb.44:
movzbl 67(%r14), %ecx
jmp .LBB5_46
.LBB5_45:
movq %r14, %rdi
movq %rax, %r12
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r12, %rax
.LBB5_46: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit126
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
.LBB5_48: # %.loopexit168
movl $_ZSt4cout, %edi
movl $.L.str.13, %esi
movl $9, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB5_72
# %bb.49: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i128
cmpb $0, 56(%r14)
movabsq $4294967360, %r12 # imm = 0x100000040
je .LBB5_51
# %bb.50:
movzbl 67(%r14), %eax
jmp .LBB5_52
.LBB5_51:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_52: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit131
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %r15, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_54
# %bb.53:
movq 8(%rsp), %rax
movq %rax, 32(%rsp)
movl $1000, 4(%rsp) # imm = 0x3E8
movl $100000, 108(%rsp) # imm = 0x186A0
leaq 32(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
leaq 108(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10mergeblockPjij, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_54:
callq hipGetLastError
movl $.L.str.6, %esi
movl %eax, %edi
movl $290, %edx # imm = 0x122
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movq 8(%rsp), %rsi
movl $400000, %edx # imm = 0x61A80
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.L.str.14, %esi
movl %eax, %edi
movl $295, %edx # imm = 0x127
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movq 72(%rsp), %rdi
callq hipFree
movl $.L.str.15, %esi
movl %eax, %edi
movl $296, %edx # imm = 0x128
callq _ZL5checkI10hipError_tEvT_PKcS3_i
movq 8(%rsp), %rdi
callq hipFree
movl $.L.str.16, %esi
movl %eax, %edi
movl $297, %edx # imm = 0x129
callq _ZL5checkI10hipError_tEvT_PKcS3_i
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_55: # =>This Inner Loop Header: Depth=1
movl 800112(%rsp,%r14,4), %r15d
cmpl (%rbx,%r14,4), %r15d
jne .LBB5_56
# %bb.65: # in Loop: Header=BB5_55 Depth=1
incq %r14
cmpq $100000, %r14 # imm = 0x186A0
jne .LBB5_55
jmp .LBB5_66
.LBB5_56:
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $3, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movq %r14, %rsi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r12
movl $.L.str.9, %esi
movl $6, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %r13
testq %r13, %r13
je .LBB5_72
# %bb.57: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i133
cmpb $0, 56(%r13)
je .LBB5_59
# %bb.58:
movzbl 67(%r13), %eax
jmp .LBB5_60
.LBB5_59:
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_60: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit136
movsbl %al, %esi
movq %r12, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movq %r15, %rsi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r15
movl $.L.str.10, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%rbx,%r14,4), %esi
movq %r15, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_72
# %bb.61: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i138
cmpb $0, 56(%rbx)
je .LBB5_63
# %bb.62:
movzbl 67(%rbx), %ecx
jmp .LBB5_64
.LBB5_63:
movq %rbx, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB5_64: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit141
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
cmpq $100000, %r14 # imm = 0x186A0
jne .LBB5_71
.LBB5_66: # %.critedge
movl $_ZSt4cout, %edi
movl $.L.str.17, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB5_72
# %bb.67: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i143
cmpb $0, 56(%rbx)
je .LBB5_69
# %bb.68:
movzbl 67(%rbx), %eax
jmp .LBB5_70
.LBB5_69:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_70: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit146
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
.LBB5_71:
xorl %eax, %eax
addq $1600120, %rsp # imm = 0x186A78
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB5_72:
.cfi_def_cfa_offset 1600176
callq _ZSt16__throw_bad_castv
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function _ZL5checkI10hipError_tEvT_PKcS3_i
.type _ZL5checkI10hipError_tEvT_PKcS3_i,@function
_ZL5checkI10hipError_tEvT_PKcS3_i: # @_ZL5checkI10hipError_tEvT_PKcS3_i
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB6_2
# %bb.1:
retq
.LBB6_2:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
movl %edi, %ebp
movl $_ZSt4cerr, %edi
movq %rsi, %rbx
movl $.L.str.18, %esi
movl %edx, %r14d
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movl $.L.str.2, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movl $.L.str.19, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movl %r14d, %esi
callq _ZNSolsEi
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
movl %ebp, %edi
callq hipGetErrorString
movl $_ZSt4cerr, %edi
movq %rax, %rsi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movl $.L.str.20, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movq %rbx, %rsi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
movl $1, %edi
callq exit
.Lfunc_end6:
.size _ZL5checkI10hipError_tEvT_PKcS3_i, .Lfunc_end6-_ZL5checkI10hipError_tEvT_PKcS3_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9prefixsumPjS_ij, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23serialsum_accrossthreadPjij, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11mergethreadPjij, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10mergeblockPjij, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9prefixsumPjS_ij,@object # @_Z9prefixsumPjS_ij
.section .rodata,"a",@progbits
.globl _Z9prefixsumPjS_ij
.p2align 3, 0x0
_Z9prefixsumPjS_ij:
.quad _Z24__device_stub__prefixsumPjS_ij
.size _Z9prefixsumPjS_ij, 8
.type _Z23serialsum_accrossthreadPjij,@object # @_Z23serialsum_accrossthreadPjij
.globl _Z23serialsum_accrossthreadPjij
.p2align 3, 0x0
_Z23serialsum_accrossthreadPjij:
.quad _Z38__device_stub__serialsum_accrossthreadPjij
.size _Z23serialsum_accrossthreadPjij, 8
.type _Z11mergethreadPjij,@object # @_Z11mergethreadPjij
.globl _Z11mergethreadPjij
.p2align 3, 0x0
_Z11mergethreadPjij:
.quad _Z26__device_stub__mergethreadPjij
.size _Z11mergethreadPjij, 8
.type _Z10mergeblockPjij,@object # @_Z10mergeblockPjij
.globl _Z10mergeblockPjij
.p2align 3, 0x0
_Z10mergeblockPjij:
.quad _Z25__device_stub__mergeblockPjij
.size _Z10mergeblockPjij, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "data preparation done"
.size .L.str, 22
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "hipMalloc((void**)&d_in,n*sizeof(unsigned int))"
.size .L.str.1, 48
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/Haoping-Xiao/Parallel-Programming/main/prefixsum/prefixsum.hip"
.size .L.str.2, 120
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "hipMalloc((void**)&d_sum,n*sizeof(unsigned int))"
.size .L.str.3, 49
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "hipMemset(d_sum,0,n*sizeof(unsigned int))"
.size .L.str.4, 42
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "hipMemcpy(d_in,data,n * sizeof(unsigned int), hipMemcpyHostToDevice)"
.size .L.str.5, 69
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "hipGetLastError()"
.size .L.str.6, 18
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "hipMemcpy(inter_sum, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost)"
.size .L.str.7, 77
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "i: "
.size .L.str.8, 4
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "error!"
.size .L.str.9, 7
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "vs"
.size .L.str.10, 3
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "pass here 1"
.size .L.str.11, 12
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "hipMemcpy(d_sum, inter_sum,n * sizeof(unsigned int), hipMemcpyHostToDevice)"
.size .L.str.12, 76
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "pass here"
.size .L.str.13, 10
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "hipMemcpy(cal_result, d_sum, n * sizeof(unsigned int), hipMemcpyDeviceToHost)"
.size .L.str.14, 78
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz "hipFree(d_in)"
.size .L.str.15, 14
.type .L.str.16,@object # @.str.16
.L.str.16:
.asciz "hipFree(d_sum)"
.size .L.str.16, 15
.type .L.str.17,@object # @.str.17
.L.str.17:
.asciz "correct"
.size .L.str.17, 8
.type .L.str.18,@object # @.str.18
.L.str.18:
.asciz "CUDA error at: "
.size .L.str.18, 16
.type .L.str.19,@object # @.str.19
.L.str.19:
.asciz ":"
.size .L.str.19, 2
.type .L.str.20,@object # @.str.20
.L.str.20:
.asciz " "
.size .L.str.20, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9prefixsumPjS_ij"
.size .L__unnamed_1, 19
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z23serialsum_accrossthreadPjij"
.size .L__unnamed_2, 32
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z11mergethreadPjij"
.size .L__unnamed_3, 20
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z10mergeblockPjij"
.size .L__unnamed_4, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__prefixsumPjS_ij
.addrsig_sym _Z38__device_stub__serialsum_accrossthreadPjij
.addrsig_sym _Z26__device_stub__mergethreadPjij
.addrsig_sym _Z25__device_stub__mergeblockPjij
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9prefixsumPjS_ij
.addrsig_sym _Z23serialsum_accrossthreadPjij
.addrsig_sym _Z11mergethreadPjij
.addrsig_sym _Z10mergeblockPjij
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt4cerr
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<iostream>
#include<algorithm>
#include<iomanip>
#include<time.h>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/sort.h>
#include <thrust/iterator/permutation_iterator.h>
#define N (8<<27)
#define M N/10
template<class T>
class plusOne{
public:
__device__ __host__ T operator() (T a){
return a+1;
}
};
int f()
{
srand(time(NULL));
return rand() % 1000;
}
int main(){
printf("size %d \n",N);
srand(time(NULL));
thrust::host_vector<int> source(N);
std::generate(source.begin(),source.end(),rand);
thrust::device_vector<int> dsource=source;
thrust::host_vector<int> map(M);
/*
map[0] = 3;
map[1] = 1;
map[2] = 0;
map[3] = 5;
*/
std::generate(map.begin(),map.end(),f);
thrust::device_vector<int> dmap=map;
cudaEvent_t start,stop;
float elapsed;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
int sum = thrust::reduce(thrust::make_permutation_iterator(dsource.begin(), dmap.begin()), thrust::make_permutation_iterator(dsource.begin(), dmap.end()));
std::cout << "sum :" << sum << std::endl;
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed,start,stop);
std::cout<<"gpu :"<<elapsed<<"ms"<<std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
} | #include <hip/hip_runtime.h>
#include<iostream>
#include<algorithm>
#include<iomanip>
#include<time.h>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/sort.h>
#include <thrust/iterator/permutation_iterator.h>
#define N (8<<27)
#define M N/10
template<class T>
class plusOne{
public:
__device__ __host__ T operator() (T a){
return a+1;
}
};
int f()
{
srand(time(NULL));
return rand() % 1000;
}
int main(){
printf("size %d \n",N);
srand(time(NULL));
thrust::host_vector<int> source(N);
std::generate(source.begin(),source.end(),rand);
thrust::device_vector<int> dsource=source;
thrust::host_vector<int> map(M);
/*
map[0] = 3;
map[1] = 1;
map[2] = 0;
map[3] = 5;
*/
std::generate(map.begin(),map.end(),f);
thrust::device_vector<int> dmap=map;
hipEvent_t start,stop;
float elapsed;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
int sum = thrust::reduce(thrust::make_permutation_iterator(dsource.begin(), dmap.begin()), thrust::make_permutation_iterator(dsource.begin(), dmap.end()));
std::cout << "sum :" << sum << std::endl;
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed,start,stop);
std::cout<<"gpu :"<<elapsed<<"ms"<<std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <fstream>
#include <iterator>
#include <vector>
#include <iostream>
#include <cstdlib>
#include <string>
#include <sstream>
#include <iomanip>
#include <math.h>
#include <stdio.h>
//Define the Z direction size as a global variable
#define z 400
#define blockSize 128
#define energySize 64000000
//define macro for error checking
#define cudaCheckError(){ \
cudaError_t err = cudaGetLastError(); \
if(err != cudaSuccess){ \
std::cout << "Error in " << __FILE__ << " at line " << __LINE__ << " : " << cudaGetErrorString(err) << std::endl; \
exit(EXIT_FAILURE); \
} \
}
__inline__ __device__ int absolute(int value){
return value < 0 ? value*-1 : value;
}
/***********************************************************************************
BeamShift takes in energy array from the host loop and moves the coordinates
as specified to move the beam grid, given by the starting value 400*y + x, which
corresponds to the beginning of the offset to write the energy value into
corresponding to the "move". Alternatively, think of the "move" in x-y direction as
having to be mapped back into a location in the 64M element energy array, since the
energy array contains all information about how the field is distributed and paraview
does the coordinate assignation.
************************************************************************************/
__global__ void beamShift(double* energy, double *temp, int move_x, int move_y, const int size){
int tid = blockIdx.x*blockDim.x*8 + threadIdx.x;
int id = absolute(400*move_y + move_x + tid);
if(id < size){
#pragma unroll
for(int i = 0; i < 8; i++){
temp[tid+blockSize*i] = energy[id+blockSize*i]+energy[tid+blockSize*i];
}
}
}
/***********************************************************************************
Sum takes in each array after the move and sums them back together to get the correct
energy values for the field. This function exploits a thread level parallelism of 32
************************************************************************************/
__global__ void sum(double* temp, double* energy, const int size){
int tid = blockIdx.x*blockDim.x*32 + threadIdx.x;
if(tid < size){
#pragma unroll
for(int i = 0; i < 32; i++){
temp[tid + i*blockSize] += energy[tid + i*blockSize];
}
}
}
//Read in energy source file data
void getSourceFile(std::vector<double>& eNomVec, std::vector<double>& rangeVec,
std::vector<double>& sigmaXVec,std::vector<double>& sigmaYVec,
std::vector<double>& eMeanVec, std::vector<double>& sigmaEVec,
std::vector<double>& xVec, std::vector<double>& yVec,
std::vector<double>& nxVec,std::vector<double>& nyVec,
std::vector<double>& weightVec, int& numGroups)
{
int dateOfMeasurement;
long int numberOfGroups;
double eNom, range, sigmaX, sigmaY, eMean, sigmaE, xcoord, ycoord, weight, nx, ny;
std::string line;
//declare and open file
std::ifstream ifile("IMPT_source.dat", std::ios::in);
if(!ifile){
std::cout << "Error, IMPT_source not found" << std::endl;
}else{
//read in date of measurement
ifile >> dateOfMeasurement;
//read in number of groups
ifile >> numberOfGroups;
numGroups = numberOfGroups;
//skip over header line
std::string e, r, x, y, m, s, nx1, ny1, x1, y1, w;
ifile >> e;
ifile >> r;
ifile >> x;
ifile >> y;
ifile >> m;
ifile >> s;
ifile >> x1;
ifile >> y1;
ifile >> nx1;
ifile >> ny1;
ifile >> w;
//intialize memory for faster read in
xVec.reserve(numberOfGroups);
yVec.reserve(numberOfGroups);
nxVec.reserve(numberOfGroups);
nyVec.reserve(numberOfGroups);
weightVec.reserve(numberOfGroups);
eNomVec.reserve(numberOfGroups);
//read in data to vectorsG
for(int i = 0; i < numberOfGroups; i++){
ifile >> eNom;
ifile >> range;
ifile >> sigmaX;
ifile >> sigmaY;
ifile >> eMean;
ifile >> sigmaE;
ifile >> xcoord;
ifile >> ycoord;
ifile >> nx;
ifile >> ny;
ifile >> weight;
eNomVec.push_back(eNom);
// rangeVec.push_back(range);
// sigmaXVec.push_back(sigmaX);
// sigmaYVec.push_back(sigmaY);
// eMeanVec.push_back(eMean);
xVec.push_back(xcoord);
yVec.push_back(ycoord);
nxVec.push_back(nx);
nyVec.push_back(ny);
weightVec.push_back(weight);
}
}
}
int main(int argc, char** argv){
//get command line arguments for the elements to loop over and error check
if(argc < 2){
std::cout << "Too few arguments, need two for range of beam values" << std::endl;
exit(EXIT_FAILURE);
}else if(argc > 5 ){
std::cout << "Too many arguments, need two for range of beam values" << std::endl;
exit(EXIT_FAILURE);
}else if(atoi(argv[1]) <= 0 || atoi(argv[1]) > 94 || atoi(argv[2]) <= 0 || atoi(argv[2]) > 94){
std::cout << "Arguments out of range, must be in range [1,94]" << std::endl;
exit(EXIT_FAILURE);
}
//declare stuff for source file read
int numberOfGroups;
std::vector<double> eNom, range, sigmaX, sigmaY, eMean, sigmaE, xCoord, yCoord, nx, ny, weight;
getSourceFile(eNom, range, sigmaX, sigmaY, eMean, sigmaE, xCoord, yCoord, nx, ny, weight, numberOfGroups);
//intialize device for faster update of values using all 16 GPUs as defined by the run.sh script
cudaSetDevice(atoi(argv[3]));
for(int master = atoi(argv[1])-1; master < atoi(argv[2]); master++){
//declare stream size variables and open file/check for errors
std::streampos bufferSize;
//create fileName to read in data
std::ostringstream fName;
if(master < 9){
fName << std::fixed << "GyPerMU3D_beamlet_0" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV.bin";
}else{
fName << std::fixed << "GyPerMU3D_beamlet_" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV.bin";
}
std::string fileName = fName.str();
std::ifstream ifile(fileName.c_str(), std::ios::in | std::ios::binary);
if(!ifile){
std::cout << "Error, no file found" << std::endl;
exit(1);
}
//get file size
ifile.seekg(0, std::ios::end);
bufferSize = ifile.tellg();
ifile.seekg(0, std::ios::beg);
//declare buffer
std::vector<double> buffer(bufferSize/sizeof(double));
//read in data
ifile.read(reinterpret_cast<char*>(buffer.data()), bufferSize);
//declare size of data for later malloc's
int size = bufferSize/(sizeof(double)*z);
//fill new array of 400x400 field correctly with zeroes in each layer for extra points out of range
std::vector<double> key;
for(int k = 0; k < 400; k++){
for(int i = 0; i < 400; i++){
for(int j = 0; j < 400; j++){
if(j < 100 && i < 100){
key.push_back(0);
}else if(j >= 100 && j < 300 && i >= 100 && i < 300){
key.push_back(buffer[(200*(i-100) + (j-100))+(k*40000)]);
}else{
key.push_back(0);
}
}
}
}
//copy memory from buffer to energy
double *energy;
energy = (double*)malloc(energySize*sizeof(double));
std::copy(key.begin(), key.end(), energy);
//free memory from buffer
std::vector<double>().swap(buffer);
///create #spots variable;
int spots = nx[master]*ny[master];
//declare move arrays for grid distribution
std::vector<float> move1, move2;
move1.reserve(spots);
move2.reserve(spots);
int moveX[spots], moveY[spots];
double fieldSize = -1*(xCoord[master]+yCoord[master])/(nx[master]-1);
double field = -1*(xCoord[master]+yCoord[master]);
//declare spacings
int spaceX = ceil(field*10/(nx[master]));
int spaceY = ceil(field*10/(ny[master]));
for(int i = 0, x = xCoord[master]*10; i < nx[master]; i++, x += spaceX){
for(int j = 0, y = yCoord[master]*10; j < ny[master]; j++, y+= spaceY){
move1.push_back(x);
move2.push_back(y);
}
}
//copy to arrays for cuda since vectors are more annoying to work with as they are heap objects
std::copy(move1.begin(), move1.end(), moveX);
std::copy(move2.begin(), move2.end(), moveY);
/***********************************************************************************************************/
//Declare gridSize for cuda kernels
int gridSize = (energySize+blockSize-1)/blockSize;
//declare host arrays
double *temp_energy, *h_energy;
//allocate an array to hold the sum of each movement on the device
cudaMallocHost((void**)&temp_energy, energySize*sizeof(double));
cudaCheckError();
double *d_energy;
cudaMalloc((void**)&d_energy, energySize*sizeof(double));
cudaCheckError();
cudaMemcpy(d_energy, energy, energySize*sizeof(double), cudaMemcpyHostToDevice);
cudaCheckError();
//loop to perform all the moves.
for(int i = 0; i < spots; i++){
double *d_temp;
cudaMalloc((void**)&d_temp, energySize*sizeof(double));
cudaCheckError();
//kernel to perform all the moves for the grid
beamShift<<<gridSize/8, blockSize>>>(d_energy, d_temp, moveX[i], moveY[i], energySize);
sum<<<gridSize/32, blockSize>>>(temp_energy, d_temp, energySize);
cudaFree(d_temp);
}
//read off the temp_energy vector from the device
cudaMallocHost((void**)&h_energy, energySize*sizeof(double));
cudaCheckError();
cudaMemcpyAsync(h_energy, temp_energy, energySize*sizeof(double), cudaMemcpyDeviceToHost);
cudaCheckError();
//final kernel to subtract off extra copies of energy
for(int i = 0; i < energySize; i++){
h_energy[i] -= energy[i]*(spots);
//h_energy[i] *= weight[master]; TODO: Uncomment for weighted run
if(h_energy[i] < 0){
h_energy[i] = 0;
}
}
//read out to 94 files individually
std::ostringstream OName;
if(master < 9){
OName << std::fixed << "GyPerMU3D_0" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV_field_" << std::setprecision(0) << nx[master] << "by"
<< std::setprecision(0) << ny[master] << "spots_" << std::setprecision(2) << fieldSize << "by" << std::setprecision(2) << fieldSize << "cm2spacing.bin";
}else{
OName << std::fixed << "GyPerMU3D_" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV_field_" << std::setprecision(0) << nx[master] << "by"
<< std::setprecision(0) << ny[master] << "spots_" << std::setprecision(2) << fieldSize << "by" << std::setprecision(2) << fieldSize << "cm2spacing.bin";
}
std::string fileNameOut = OName.str();
std::ofstream ofile(fileNameOut.c_str() , std::ios::out | std::ios::binary);
ofile.write(reinterpret_cast<char*>(h_energy), energySize*sizeof(double));
cudaDeviceReset();
}//end of master loop
}//end of main | code for sm_80
Function : _Z3sumPdS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ USHF.L.U32 UR4, UR4, 0x5, URZ ; /* 0x0000000504047899 */
/* 0x000fe2000800063f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e2a0000002100 */
/*0050*/ IMAD R4, R4, UR4, R3 ; /* 0x0000000404047c24 */
/* 0x001fca000f8e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */
/* 0x000fe200000001ff */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00a0*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fc800078e0205 */
/*00b0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fe200078e0205 */
/*00c0*/ LDG.E.64 R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000ea8000c1e1b00 */
/*00d0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea4000c1e1b00 */
/*00e0*/ DADD R6, R6, R8 ; /* 0x0000000006067229 */
/* 0x0040640000000008 */
/*00f0*/ LDG.E.64 R8, [R2.64+0x400] ; /* 0x0004000402087981 */
/* 0x001eaa000c1e1b00 */
/*0100*/ STG.E.64 [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x0021e8000c101b04 */
/*0110*/ LDG.E.64 R10, [R4.64+0x400] ; /* 0x00040004040a7981 */
/* 0x000ea8000c1e1b00 */
/*0120*/ LDG.E.64 R6, [R2.64+0x1000] ; /* 0x0010000402067981 */
/* 0x001ee2000c1e1b00 */
/*0130*/ DADD R8, R8, R10 ; /* 0x0000000008087229 */
/* 0x004046000000000a */
/*0140*/ LDG.E.64 R10, [R2.64+0x800] ; /* 0x00080004020a7981 */
/* 0x001ea8000c1e1b00 */
/*0150*/ STG.E.64 [R2.64+0x400], R8 ; /* 0x0004000802007986 */
/* 0x0021e8000c101b04 */
/*0160*/ LDG.E.64 R12, [R4.64+0x800] ; /* 0x00080004040c7981 */
/* 0x000ea8000c1e1b00 */
/*0170*/ LDG.E.64 R8, [R2.64+0x1400] ; /* 0x0014000402087981 */
/* 0x001f22000c1e1b00 */
/*0180*/ DADD R10, R10, R12 ; /* 0x000000000a0a7229 */
/* 0x004046000000000c */
/*0190*/ LDG.E.64 R12, [R2.64+0xc00] ; /* 0x000c0004020c7981 */
/* 0x001ea8000c1e1b00 */
/*01a0*/ STG.E.64 [R2.64+0x800], R10 ; /* 0x0008000a02007986 */
/* 0x0021e8000c101b04 */
/*01b0*/ LDG.E.64 R14, [R4.64+0xc00] ; /* 0x000c0004040e7981 */
/* 0x000ea8000c1e1b00 */
/*01c0*/ LDG.E.64 R10, [R2.64+0x1800] ; /* 0x00180004020a7981 */
/* 0x001f62000c1e1b00 */
/*01d0*/ DADD R12, R12, R14 ; /* 0x000000000c0c7229 */
/* 0x004e0e000000000e */
/*01e0*/ STG.E.64 [R2.64+0xc00], R12 ; /* 0x000c000c02007986 */
/* 0x0011e8000c101b04 */
/*01f0*/ LDG.E.64 R14, [R4.64+0x1000] ; /* 0x00100004040e7981 */
/* 0x000ee8000c1e1b00 */
/*0200*/ LDG.E.64 R12, [R2.64+0x1c00] ; /* 0x001c0004020c7981 */
/* 0x001ea2000c1e1b00 */
/*0210*/ DADD R6, R6, R14 ; /* 0x0000000006067229 */
/* 0x008e0e000000000e */
/*0220*/ STG.E.64 [R2.64+0x1000], R6 ; /* 0x0010000602007986 */
/* 0x0011e8000c101b04 */
/*0230*/ LDG.E.64 R14, [R4.64+0x1400] ; /* 0x00140004040e7981 */
/* 0x000f28000c1e1b00 */
/*0240*/ LDG.E.64 R6, [R2.64+0x2000] ; /* 0x0020000402067981 */
/* 0x001ee2000c1e1b00 */
/*0250*/ DADD R8, R8, R14 ; /* 0x0000000008087229 */
/* 0x010e0e000000000e */
/*0260*/ STG.E.64 [R2.64+0x1400], R8 ; /* 0x0014000802007986 */
/* 0x0011e8000c101b04 */
/*0270*/ LDG.E.64 R14, [R4.64+0x1800] ; /* 0x00180004040e7981 */
/* 0x000f68000c1e1b00 */
/*0280*/ LDG.E.64 R8, [R2.64+0x2400] ; /* 0x0024000402087981 */
/* 0x001f22000c1e1b00 */
/*0290*/ DADD R10, R10, R14 ; /* 0x000000000a0a7229 */
/* 0x020e0e000000000e */
/*02a0*/ STG.E.64 [R2.64+0x1800], R10 ; /* 0x0018000a02007986 */
/* 0x0011e8000c101b04 */
/*02b0*/ LDG.E.64 R14, [R4.64+0x1c00] ; /* 0x001c0004040e7981 */
/* 0x000ea8000c1e1b00 */
/*02c0*/ LDG.E.64 R10, [R2.64+0x2800] ; /* 0x00280004020a7981 */
/* 0x001f62000c1e1b00 */
/*02d0*/ DADD R12, R12, R14 ; /* 0x000000000c0c7229 */
/* 0x004e0e000000000e */
/*02e0*/ STG.E.64 [R2.64+0x1c00], R12 ; /* 0x001c000c02007986 */
/* 0x0011e8000c101b04 */
/*02f0*/ LDG.E.64 R14, [R4.64+0x2000] ; /* 0x00200004040e7981 */
/* 0x000ee8000c1e1b00 */
/*0300*/ LDG.E.64 R12, [R2.64+0x2c00] ; /* 0x002c0004020c7981 */
/* 0x001ea2000c1e1b00 */
/*0310*/ DADD R6, R6, R14 ; /* 0x0000000006067229 */
/* 0x008e0e000000000e */
/*0320*/ STG.E.64 [R2.64+0x2000], R6 ; /* 0x0020000602007986 */
/* 0x0011e8000c101b04 */
/*0330*/ LDG.E.64 R14, [R4.64+0x2400] ; /* 0x00240004040e7981 */
/* 0x000f28000c1e1b00 */
/*0340*/ LDG.E.64 R6, [R2.64+0x3000] ; /* 0x0030000402067981 */
/* 0x001ee2000c1e1b00 */
/*0350*/ DADD R8, R8, R14 ; /* 0x0000000008087229 */
/* 0x010e0e000000000e */
/*0360*/ STG.E.64 [R2.64+0x2400], R8 ; /* 0x0024000802007986 */
/* 0x0011e8000c101b04 */
/*0370*/ LDG.E.64 R14, [R4.64+0x2800] ; /* 0x00280004040e7981 */
/* 0x000f68000c1e1b00 */
/*0380*/ LDG.E.64 R8, [R2.64+0x3400] ; /* 0x0034000402087981 */
/* 0x001f22000c1e1b00 */
/*0390*/ DADD R10, R10, R14 ; /* 0x000000000a0a7229 */
/* 0x020e0e000000000e */
/*03a0*/ STG.E.64 [R2.64+0x2800], R10 ; /* 0x0028000a02007986 */
/* 0x0011e8000c101b04 */
/*03b0*/ LDG.E.64 R14, [R4.64+0x2c00] ; /* 0x002c0004040e7981 */
/* 0x000ea8000c1e1b00 */
/*03c0*/ LDG.E.64 R10, [R2.64+0x3800] ; /* 0x00380004020a7981 */
/* 0x001f62000c1e1b00 */
/*03d0*/ DADD R12, R12, R14 ; /* 0x000000000c0c7229 */
/* 0x004e0e000000000e */
/*03e0*/ STG.E.64 [R2.64+0x2c00], R12 ; /* 0x002c000c02007986 */
/* 0x0011e8000c101b04 */
/*03f0*/ LDG.E.64 R14, [R4.64+0x3000] ; /* 0x00300004040e7981 */
/* 0x000ee8000c1e1b00 */
/*0400*/ LDG.E.64 R12, [R2.64+0x3c00] ; /* 0x003c0004020c7981 */
/* 0x001ea2000c1e1b00 */
/*0410*/ DADD R6, R6, R14 ; /* 0x0000000006067229 */
/* 0x008e0e000000000e */
/*0420*/ STG.E.64 [R2.64+0x3000], R6 ; /* 0x0030000602007986 */
/* 0x0011e8000c101b04 */
/*0430*/ LDG.E.64 R14, [R4.64+0x3400] ; /* 0x00340004040e7981 */
/* 0x000f28000c1e1b00 */
/*0440*/ LDG.E.64 R6, [R2.64+0x4000] ; /* 0x0040000402067981 */
/* 0x001ee2000c1e1b00 */
/*0450*/ DADD R8, R8, R14 ; /* 0x0000000008087229 */
/* 0x010e0e000000000e */
/*0460*/ STG.E.64 [R2.64+0x3400], R8 ; /* 0x0034000802007986 */
/* 0x0011e8000c101b04 */
/*0470*/ LDG.E.64 R14, [R4.64+0x3800] ; /* 0x00380004040e7981 */
/* 0x000f68000c1e1b00 */
/*0480*/ LDG.E.64 R8, [R2.64+0x4400] ; /* 0x0044000402087981 */
/* 0x001f22000c1e1b00 */
/*0490*/ DADD R10, R10, R14 ; /* 0x000000000a0a7229 */
/* 0x020e0e000000000e */
/*04a0*/ STG.E.64 [R2.64+0x3800], R10 ; /* 0x0038000a02007986 */
/* 0x0011e8000c101b04 */
/*04b0*/ LDG.E.64 R14, [R4.64+0x3c00] ; /* 0x003c0004040e7981 */
/* 0x000ea8000c1e1b00 */
/*04c0*/ LDG.E.64 R10, [R2.64+0x4800] ; /* 0x00480004020a7981 */
/* 0x001f62000c1e1b00 */
/*04d0*/ DADD R12, R12, R14 ; /* 0x000000000c0c7229 */
/* 0x004e0e000000000e */
/*04e0*/ STG.E.64 [R2.64+0x3c00], R12 ; /* 0x003c000c02007986 */
/* 0x0011e8000c101b04 */
/*04f0*/ LDG.E.64 R14, [R4.64+0x4000] ; /* 0x00400004040e7981 */
/* 0x000ee8000c1e1b00 */
/*0500*/ LDG.E.64 R12, [R2.64+0x4c00] ; /* 0x004c0004020c7981 */
/* 0x001ea2000c1e1b00 */
/*0510*/ DADD R6, R6, R14 ; /* 0x0000000006067229 */
/* 0x008e0e000000000e */
/*0520*/ STG.E.64 [R2.64+0x4000], R6 ; /* 0x0040000602007986 */
/* 0x0011e8000c101b04 */
/*0530*/ LDG.E.64 R14, [R4.64+0x4400] ; /* 0x00440004040e7981 */
/* 0x000f28000c1e1b00 */
/*0540*/ LDG.E.64 R6, [R2.64+0x5000] ; /* 0x0050000402067981 */
/* 0x001ee2000c1e1b00 */
/*0550*/ DADD R8, R8, R14 ; /* 0x0000000008087229 */
/* 0x010e0e000000000e */
/*0560*/ STG.E.64 [R2.64+0x4400], R8 ; /* 0x0044000802007986 */
/* 0x0011e8000c101b04 */
/*0570*/ LDG.E.64 R14, [R4.64+0x4800] ; /* 0x00480004040e7981 */
/* 0x000f68000c1e1b00 */
/*0580*/ LDG.E.64 R8, [R2.64+0x5400] ; /* 0x0054000402087981 */
/* 0x001f22000c1e1b00 */
/*0590*/ DADD R10, R10, R14 ; /* 0x000000000a0a7229 */
/* 0x020e0e000000000e */
/*05a0*/ STG.E.64 [R2.64+0x4800], R10 ; /* 0x0048000a02007986 */
/* 0x0011e8000c101b04 */
/*05b0*/ LDG.E.64 R14, [R4.64+0x4c00] ; /* 0x004c0004040e7981 */
/* 0x000ea8000c1e1b00 */
/*05c0*/ LDG.E.64 R10, [R2.64+0x5800] ; /* 0x00580004020a7981 */
/* 0x001f62000c1e1b00 */
/*05d0*/ DADD R12, R12, R14 ; /* 0x000000000c0c7229 */
/* 0x004e0e000000000e */
/*05e0*/ STG.E.64 [R2.64+0x4c00], R12 ; /* 0x004c000c02007986 */
/* 0x0011e8000c101b04 */
/*05f0*/ LDG.E.64 R14, [R4.64+0x5000] ; /* 0x00500004040e7981 */
/* 0x000ee8000c1e1b00 */
/*0600*/ LDG.E.64 R12, [R2.64+0x5c00] ; /* 0x005c0004020c7981 */
/* 0x001ea2000c1e1b00 */
/*0610*/ DADD R6, R6, R14 ; /* 0x0000000006067229 */
/* 0x008e0e000000000e */
/*0620*/ STG.E.64 [R2.64+0x5000], R6 ; /* 0x0050000602007986 */
/* 0x0011e8000c101b04 */
/*0630*/ LDG.E.64 R14, [R4.64+0x5400] ; /* 0x00540004040e7981 */
/* 0x000f28000c1e1b00 */
/*0640*/ LDG.E.64 R6, [R2.64+0x6000] ; /* 0x0060000402067981 */
/* 0x001ee2000c1e1b00 */
/*0650*/ DADD R8, R8, R14 ; /* 0x0000000008087229 */
/* 0x010e0e000000000e */
/*0660*/ STG.E.64 [R2.64+0x5400], R8 ; /* 0x0054000802007986 */
/* 0x0011e8000c101b04 */
/*0670*/ LDG.E.64 R14, [R4.64+0x5800] ; /* 0x00580004040e7981 */
/* 0x000f68000c1e1b00 */
/*0680*/ LDG.E.64 R8, [R2.64+0x6400] ; /* 0x0064000402087981 */
/* 0x001f22000c1e1b00 */
/*0690*/ DADD R10, R10, R14 ; /* 0x000000000a0a7229 */
/* 0x020e0e000000000e */
/*06a0*/ STG.E.64 [R2.64+0x5800], R10 ; /* 0x0058000a02007986 */
/* 0x0011e8000c101b04 */
/*06b0*/ LDG.E.64 R14, [R4.64+0x5c00] ; /* 0x005c0004040e7981 */
/* 0x000ea8000c1e1b00 */
/*06c0*/ LDG.E.64 R10, [R2.64+0x6800] ; /* 0x00680004020a7981 */
/* 0x001f62000c1e1b00 */
/*06d0*/ DADD R12, R12, R14 ; /* 0x000000000c0c7229 */
/* 0x004e0e000000000e */
/*06e0*/ STG.E.64 [R2.64+0x5c00], R12 ; /* 0x005c000c02007986 */
/* 0x0011e8000c101b04 */
/*06f0*/ LDG.E.64 R14, [R4.64+0x6000] ; /* 0x00600004040e7981 */
/* 0x000ee8000c1e1b00 */
/*0700*/ LDG.E.64 R12, [R2.64+0x6c00] ; /* 0x006c0004020c7981 */
/* 0x001ea2000c1e1b00 */
/*0710*/ DADD R6, R6, R14 ; /* 0x0000000006067229 */
/* 0x008e0e000000000e */
/*0720*/ STG.E.64 [R2.64+0x6000], R6 ; /* 0x0060000602007986 */
/* 0x0011e8000c101b04 */
/*0730*/ LDG.E.64 R14, [R4.64+0x6400] ; /* 0x00640004040e7981 */
/* 0x000f28000c1e1b00 */
/*0740*/ LDG.E.64 R6, [R2.64+0x7000] ; /* 0x0070000402067981 */
/* 0x001ee2000c1e1b00 */
/*0750*/ DADD R8, R8, R14 ; /* 0x0000000008087229 */
/* 0x010e0e000000000e */
/*0760*/ STG.E.64 [R2.64+0x6400], R8 ; /* 0x0064000802007986 */
/* 0x0011e8000c101b04 */
/*0770*/ LDG.E.64 R14, [R4.64+0x6800] ; /* 0x00680004040e7981 */
/* 0x000f68000c1e1b00 */
/*0780*/ LDG.E.64 R8, [R2.64+0x7400] ; /* 0x0074000402087981 */
/* 0x001f22000c1e1b00 */
/*0790*/ DADD R10, R10, R14 ; /* 0x000000000a0a7229 */
/* 0x020e0e000000000e */
/*07a0*/ STG.E.64 [R2.64+0x6800], R10 ; /* 0x0068000a02007986 */
/* 0x0011e8000c101b04 */
/*07b0*/ LDG.E.64 R14, [R4.64+0x6c00] ; /* 0x006c0004040e7981 */
/* 0x000ea8000c1e1b00 */
/*07c0*/ LDG.E.64 R10, [R2.64+0x7800] ; /* 0x00780004020a7981 */
/* 0x001f62000c1e1b00 */
/*07d0*/ DADD R12, R12, R14 ; /* 0x000000000c0c7229 */
/* 0x004e0e000000000e */
/*07e0*/ STG.E.64 [R2.64+0x6c00], R12 ; /* 0x006c000c02007986 */
/* 0x0011e8000c101b04 */
/*07f0*/ LDG.E.64 R14, [R4.64+0x7000] ; /* 0x00700004040e7981 */
/* 0x000ee8000c1e1b00 */
/*0800*/ LDG.E.64 R12, [R2.64+0x7c00] ; /* 0x007c0004020c7981 */
/* 0x001ea2000c1e1b00 */
/*0810*/ DADD R6, R6, R14 ; /* 0x0000000006067229 */
/* 0x008e0e000000000e */
/*0820*/ STG.E.64 [R2.64+0x7000], R6 ; /* 0x0070000602007986 */
/* 0x001fe8000c101b04 */
/*0830*/ LDG.E.64 R14, [R4.64+0x7400] ; /* 0x00740004040e7981 */
/* 0x000f24000c1e1b00 */
/*0840*/ DADD R8, R8, R14 ; /* 0x0000000008087229 */
/* 0x010e0e000000000e */
/*0850*/ STG.E.64 [R2.64+0x7400], R8 ; /* 0x0074000802007986 */
/* 0x001fe8000c101b04 */
/*0860*/ LDG.E.64 R14, [R4.64+0x7800] ; /* 0x00780004040e7981 */
/* 0x000f64000c1e1b00 */
/*0870*/ DADD R10, R10, R14 ; /* 0x000000000a0a7229 */
/* 0x020e0e000000000e */
/*0880*/ STG.E.64 [R2.64+0x7800], R10 ; /* 0x0078000a02007986 */
/* 0x001fe8000c101b04 */
/*0890*/ LDG.E.64 R14, [R4.64+0x7c00] ; /* 0x007c0004040e7981 */
/* 0x000ea4000c1e1b00 */
/*08a0*/ DADD R12, R12, R14 ; /* 0x000000000c0c7229 */
/* 0x004e0e000000000e */
/*08b0*/ STG.E.64 [R2.64+0x7c00], R12 ; /* 0x007c000c02007986 */
/* 0x001fe2000c101b04 */
/*08c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*08d0*/ BRA 0x8d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0900*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z9beamShiftPdS_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ USHF.L.U32 UR4, UR4, 0x3, URZ ; /* 0x0000000304047899 */
/* 0x000fe2000800063f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e2a0000002100 */
/*0050*/ IMAD R0, R0, UR4, R3 ; /* 0x0000000400007c24 */
/* 0x001fe2000f8e0203 */
/*0060*/ MOV R3, c[0x0][0x174] ; /* 0x00005d0000037a02 */
/* 0x000fc80000000f00 */
/*0070*/ IADD3 R2, R0, c[0x0][0x170], RZ ; /* 0x00005c0000027a10 */
/* 0x000fca0007ffe0ff */
/*0080*/ IMAD R2, R3, 0x190, R2 ; /* 0x0000019003027824 */
/* 0x000fca00078e0202 */
/*0090*/ IABS R2, R2 ; /* 0x0000000200027213 */
/* 0x000fc80000000000 */
/*00a0*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */
/* 0x000fda0003f06270 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ HFMA2.MMA R11, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0b7435 */
/* 0x000fe200000001ff */
/*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00e0*/ IMAD.WIDE R2, R2, R11, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc800078e020b */
/*00f0*/ IMAD.WIDE R4, R0.reuse, R11.reuse, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x0c0fe200078e020b */
/*0100*/ LDG.E.64 R8, [R2.64] ; /* 0x0000000402087981 */
/* 0x000ea8000c1e1b00 */
/*0110*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea4000c1e1b00 */
/*0120*/ DADD R8, R6, R8 ; /* 0x0000000006087229 */
/* 0x0040640000000008 */
/*0130*/ IMAD.WIDE R6, R0, R11, c[0x0][0x168] ; /* 0x00005a0000067625 */
/* 0x001fca00078e020b */
/*0140*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */
/* 0x0021e8000c101b04 */
/*0150*/ LDG.E.64 R10, [R4.64+0x400] ; /* 0x00040004040a7981 */
/* 0x000ea8000c1e1b00 */
/*0160*/ LDG.E.64 R12, [R2.64+0x400] ; /* 0x00040004020c7981 */
/* 0x000ea4000c1e1b00 */
/*0170*/ DADD R10, R10, R12 ; /* 0x000000000a0a7229 */
/* 0x004e4e000000000c */
/*0180*/ STG.E.64 [R6.64+0x400], R10 ; /* 0x0004000a06007986 */
/* 0x0023e8000c101b04 */
/*0190*/ LDG.E.64 R12, [R4.64+0x800] ; /* 0x00080004040c7981 */
/* 0x000ea8000c1e1b00 */
/*01a0*/ LDG.E.64 R14, [R2.64+0x800] ; /* 0x00080004020e7981 */
/* 0x000ea4000c1e1b00 */
/*01b0*/ DADD R12, R12, R14 ; /* 0x000000000c0c7229 */
/* 0x004e8e000000000e */
/*01c0*/ STG.E.64 [R6.64+0x800], R12 ; /* 0x0008000c06007986 */
/* 0x0045e8000c101b04 */
/*01d0*/ LDG.E.64 R14, [R4.64+0xc00] ; /* 0x000c0004040e7981 */
/* 0x000ee8000c1e1b00 */
/*01e0*/ LDG.E.64 R16, [R2.64+0xc00] ; /* 0x000c000402107981 */
/* 0x000ee4000c1e1b00 */
/*01f0*/ DADD R14, R14, R16 ; /* 0x000000000e0e7229 */
/* 0x008ece0000000010 */
/*0200*/ STG.E.64 [R6.64+0xc00], R14 ; /* 0x000c000e06007986 */
/* 0x0087e8000c101b04 */
/*0210*/ LDG.E.64 R8, [R4.64+0x1000] ; /* 0x0010000404087981 */
/* 0x001f28000c1e1b00 */
/*0220*/ LDG.E.64 R16, [R2.64+0x1000] ; /* 0x0010000402107981 */
/* 0x000f24000c1e1b00 */
/*0230*/ DADD R8, R8, R16 ; /* 0x0000000008087229 */
/* 0x010e0e0000000010 */
/*0240*/ STG.E.64 [R6.64+0x1000], R8 ; /* 0x0010000806007986 */
/* 0x001fe8000c101b04 */
/*0250*/ LDG.E.64 R10, [R4.64+0x1400] ; /* 0x00140004040a7981 */
/* 0x002f28000c1e1b00 */
/*0260*/ LDG.E.64 R16, [R2.64+0x1400] ; /* 0x0014000402107981 */
/* 0x000f24000c1e1b00 */
/*0270*/ DADD R10, R10, R16 ; /* 0x000000000a0a7229 */
/* 0x010e0e0000000010 */
/*0280*/ STG.E.64 [R6.64+0x1400], R10 ; /* 0x0014000a06007986 */
/* 0x001fe8000c101b04 */
/*0290*/ LDG.E.64 R12, [R4.64+0x1800] ; /* 0x00180004040c7981 */
/* 0x004ea8000c1e1b00 */
/*02a0*/ LDG.E.64 R16, [R2.64+0x1800] ; /* 0x0018000402107981 */
/* 0x000ea4000c1e1b00 */
/*02b0*/ DADD R12, R12, R16 ; /* 0x000000000c0c7229 */
/* 0x004e0e0000000010 */
/*02c0*/ STG.E.64 [R6.64+0x1800], R12 ; /* 0x0018000c06007986 */
/* 0x001fe8000c101b04 */
/*02d0*/ LDG.E.64 R14, [R4.64+0x1c00] ; /* 0x001c0004040e7981 */
/* 0x008ea8000c1e1b00 */
/*02e0*/ LDG.E.64 R16, [R2.64+0x1c00] ; /* 0x001c000402107981 */
/* 0x000ea4000c1e1b00 */
/*02f0*/ DADD R14, R14, R16 ; /* 0x000000000e0e7229 */
/* 0x004e0e0000000010 */
/*0300*/ STG.E.64 [R6.64+0x1c00], R14 ; /* 0x001c000e06007986 */
/* 0x001fe2000c101b04 */
/*0310*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0320*/ BRA 0x320; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <fstream>
#include <iterator>
#include <vector>
#include <iostream>
#include <cstdlib>
#include <string>
#include <sstream>
#include <iomanip>
#include <math.h>
#include <stdio.h>
//Define the Z direction size as a global variable
#define z 400
#define blockSize 128
#define energySize 64000000
//define macro for error checking
#define cudaCheckError(){ \
cudaError_t err = cudaGetLastError(); \
if(err != cudaSuccess){ \
std::cout << "Error in " << __FILE__ << " at line " << __LINE__ << " : " << cudaGetErrorString(err) << std::endl; \
exit(EXIT_FAILURE); \
} \
}
__inline__ __device__ int absolute(int value){
return value < 0 ? value*-1 : value;
}
/***********************************************************************************
BeamShift takes in energy array from the host loop and moves the coordinates
as specified to move the beam grid, given by the starting value 400*y + x, which
corresponds to the beginning of the offset to write the energy value into
corresponding to the "move". Alternatively, think of the "move" in x-y direction as
having to be mapped back into a location in the 64M element energy array, since the
energy array contains all information about how the field is distributed and paraview
does the coordinate assignation.
************************************************************************************/
__global__ void beamShift(double* energy, double *temp, int move_x, int move_y, const int size){
int tid = blockIdx.x*blockDim.x*8 + threadIdx.x;
int id = absolute(400*move_y + move_x + tid);
if(id < size){
#pragma unroll
for(int i = 0; i < 8; i++){
temp[tid+blockSize*i] = energy[id+blockSize*i]+energy[tid+blockSize*i];
}
}
}
/***********************************************************************************
Sum takes in each array after the move and sums them back together to get the correct
energy values for the field. This function exploits a thread level parallelism of 32
************************************************************************************/
__global__ void sum(double* temp, double* energy, const int size){
int tid = blockIdx.x*blockDim.x*32 + threadIdx.x;
if(tid < size){
#pragma unroll
for(int i = 0; i < 32; i++){
temp[tid + i*blockSize] += energy[tid + i*blockSize];
}
}
}
//Read in energy source file data
void getSourceFile(std::vector<double>& eNomVec, std::vector<double>& rangeVec,
std::vector<double>& sigmaXVec,std::vector<double>& sigmaYVec,
std::vector<double>& eMeanVec, std::vector<double>& sigmaEVec,
std::vector<double>& xVec, std::vector<double>& yVec,
std::vector<double>& nxVec,std::vector<double>& nyVec,
std::vector<double>& weightVec, int& numGroups)
{
int dateOfMeasurement;
long int numberOfGroups;
double eNom, range, sigmaX, sigmaY, eMean, sigmaE, xcoord, ycoord, weight, nx, ny;
std::string line;
//declare and open file
std::ifstream ifile("IMPT_source.dat", std::ios::in);
if(!ifile){
std::cout << "Error, IMPT_source not found" << std::endl;
}else{
//read in date of measurement
ifile >> dateOfMeasurement;
//read in number of groups
ifile >> numberOfGroups;
numGroups = numberOfGroups;
//skip over header line
std::string e, r, x, y, m, s, nx1, ny1, x1, y1, w;
ifile >> e;
ifile >> r;
ifile >> x;
ifile >> y;
ifile >> m;
ifile >> s;
ifile >> x1;
ifile >> y1;
ifile >> nx1;
ifile >> ny1;
ifile >> w;
//intialize memory for faster read in
xVec.reserve(numberOfGroups);
yVec.reserve(numberOfGroups);
nxVec.reserve(numberOfGroups);
nyVec.reserve(numberOfGroups);
weightVec.reserve(numberOfGroups);
eNomVec.reserve(numberOfGroups);
//read in data to vectorsG
for(int i = 0; i < numberOfGroups; i++){
ifile >> eNom;
ifile >> range;
ifile >> sigmaX;
ifile >> sigmaY;
ifile >> eMean;
ifile >> sigmaE;
ifile >> xcoord;
ifile >> ycoord;
ifile >> nx;
ifile >> ny;
ifile >> weight;
eNomVec.push_back(eNom);
// rangeVec.push_back(range);
// sigmaXVec.push_back(sigmaX);
// sigmaYVec.push_back(sigmaY);
// eMeanVec.push_back(eMean);
xVec.push_back(xcoord);
yVec.push_back(ycoord);
nxVec.push_back(nx);
nyVec.push_back(ny);
weightVec.push_back(weight);
}
}
}
int main(int argc, char** argv){
//get command line arguments for the elements to loop over and error check
if(argc < 2){
std::cout << "Too few arguments, need two for range of beam values" << std::endl;
exit(EXIT_FAILURE);
}else if(argc > 5 ){
std::cout << "Too many arguments, need two for range of beam values" << std::endl;
exit(EXIT_FAILURE);
}else if(atoi(argv[1]) <= 0 || atoi(argv[1]) > 94 || atoi(argv[2]) <= 0 || atoi(argv[2]) > 94){
std::cout << "Arguments out of range, must be in range [1,94]" << std::endl;
exit(EXIT_FAILURE);
}
//declare stuff for source file read
int numberOfGroups;
std::vector<double> eNom, range, sigmaX, sigmaY, eMean, sigmaE, xCoord, yCoord, nx, ny, weight;
getSourceFile(eNom, range, sigmaX, sigmaY, eMean, sigmaE, xCoord, yCoord, nx, ny, weight, numberOfGroups);
//intialize device for faster update of values using all 16 GPUs as defined by the run.sh script
cudaSetDevice(atoi(argv[3]));
for(int master = atoi(argv[1])-1; master < atoi(argv[2]); master++){
//declare stream size variables and open file/check for errors
std::streampos bufferSize;
//create fileName to read in data
std::ostringstream fName;
if(master < 9){
fName << std::fixed << "GyPerMU3D_beamlet_0" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV.bin";
}else{
fName << std::fixed << "GyPerMU3D_beamlet_" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV.bin";
}
std::string fileName = fName.str();
std::ifstream ifile(fileName.c_str(), std::ios::in | std::ios::binary);
if(!ifile){
std::cout << "Error, no file found" << std::endl;
exit(1);
}
//get file size
ifile.seekg(0, std::ios::end);
bufferSize = ifile.tellg();
ifile.seekg(0, std::ios::beg);
//declare buffer
std::vector<double> buffer(bufferSize/sizeof(double));
//read in data
ifile.read(reinterpret_cast<char*>(buffer.data()), bufferSize);
//declare size of data for later malloc's
int size = bufferSize/(sizeof(double)*z);
//fill new array of 400x400 field correctly with zeroes in each layer for extra points out of range
std::vector<double> key;
for(int k = 0; k < 400; k++){
for(int i = 0; i < 400; i++){
for(int j = 0; j < 400; j++){
if(j < 100 && i < 100){
key.push_back(0);
}else if(j >= 100 && j < 300 && i >= 100 && i < 300){
key.push_back(buffer[(200*(i-100) + (j-100))+(k*40000)]);
}else{
key.push_back(0);
}
}
}
}
//copy memory from buffer to energy
double *energy;
energy = (double*)malloc(energySize*sizeof(double));
std::copy(key.begin(), key.end(), energy);
//free memory from buffer
std::vector<double>().swap(buffer);
///create #spots variable;
int spots = nx[master]*ny[master];
//declare move arrays for grid distribution
std::vector<float> move1, move2;
move1.reserve(spots);
move2.reserve(spots);
int moveX[spots], moveY[spots];
double fieldSize = -1*(xCoord[master]+yCoord[master])/(nx[master]-1);
double field = -1*(xCoord[master]+yCoord[master]);
//declare spacings
int spaceX = ceil(field*10/(nx[master]));
int spaceY = ceil(field*10/(ny[master]));
for(int i = 0, x = xCoord[master]*10; i < nx[master]; i++, x += spaceX){
for(int j = 0, y = yCoord[master]*10; j < ny[master]; j++, y+= spaceY){
move1.push_back(x);
move2.push_back(y);
}
}
//copy to arrays for cuda since vectors are more annoying to work with as they are heap objects
std::copy(move1.begin(), move1.end(), moveX);
std::copy(move2.begin(), move2.end(), moveY);
/***********************************************************************************************************/
//Declare gridSize for cuda kernels
int gridSize = (energySize+blockSize-1)/blockSize;
//declare host arrays
double *temp_energy, *h_energy;
//allocate an array to hold the sum of each movement on the device
cudaMallocHost((void**)&temp_energy, energySize*sizeof(double));
cudaCheckError();
double *d_energy;
cudaMalloc((void**)&d_energy, energySize*sizeof(double));
cudaCheckError();
cudaMemcpy(d_energy, energy, energySize*sizeof(double), cudaMemcpyHostToDevice);
cudaCheckError();
//loop to perform all the moves.
for(int i = 0; i < spots; i++){
double *d_temp;
cudaMalloc((void**)&d_temp, energySize*sizeof(double));
cudaCheckError();
//kernel to perform all the moves for the grid
beamShift<<<gridSize/8, blockSize>>>(d_energy, d_temp, moveX[i], moveY[i], energySize);
sum<<<gridSize/32, blockSize>>>(temp_energy, d_temp, energySize);
cudaFree(d_temp);
}
//read off the temp_energy vector from the device
cudaMallocHost((void**)&h_energy, energySize*sizeof(double));
cudaCheckError();
cudaMemcpyAsync(h_energy, temp_energy, energySize*sizeof(double), cudaMemcpyDeviceToHost);
cudaCheckError();
//final kernel to subtract off extra copies of energy
for(int i = 0; i < energySize; i++){
h_energy[i] -= energy[i]*(spots);
//h_energy[i] *= weight[master]; TODO: Uncomment for weighted run
if(h_energy[i] < 0){
h_energy[i] = 0;
}
}
//read out to 94 files individually
std::ostringstream OName;
if(master < 9){
OName << std::fixed << "GyPerMU3D_0" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV_field_" << std::setprecision(0) << nx[master] << "by"
<< std::setprecision(0) << ny[master] << "spots_" << std::setprecision(2) << fieldSize << "by" << std::setprecision(2) << fieldSize << "cm2spacing.bin";
}else{
OName << std::fixed << "GyPerMU3D_" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV_field_" << std::setprecision(0) << nx[master] << "by"
<< std::setprecision(0) << ny[master] << "spots_" << std::setprecision(2) << fieldSize << "by" << std::setprecision(2) << fieldSize << "cm2spacing.bin";
}
std::string fileNameOut = OName.str();
std::ofstream ofile(fileNameOut.c_str() , std::ios::out | std::ios::binary);
ofile.write(reinterpret_cast<char*>(h_energy), energySize*sizeof(double));
cudaDeviceReset();
}//end of master loop
}//end of main | #include <hip/hip_runtime.h>
#include <fstream>
#include <iterator>
#include <vector>
#include <iostream>
#include <cstdlib>
#include <string>
#include <sstream>
#include <iomanip>
#include <math.h>
#include <stdio.h>
//Define the Z direction size as a global variable
#define z 400
#define blockSize 128
#define energySize 64000000
//define macro for error checking
#define cudaCheckError(){ \
hipError_t err = hipGetLastError(); \
if(err != hipSuccess){ \
std::cout << "Error in " << __FILE__ << " at line " << __LINE__ << " : " << hipGetErrorString(err) << std::endl; \
exit(EXIT_FAILURE); \
} \
}
__inline__ __device__ int absolute(int value){
return value < 0 ? value*-1 : value;
}
/***********************************************************************************
BeamShift takes in energy array from the host loop and moves the coordinates
as specified to move the beam grid, given by the starting value 400*y + x, which
corresponds to the beginning of the offset to write the energy value into
corresponding to the "move". Alternatively, think of the "move" in x-y direction as
having to be mapped back into a location in the 64M element energy array, since the
energy array contains all information about how the field is distributed and paraview
does the coordinate assignation.
************************************************************************************/
__global__ void beamShift(double* energy, double *temp, int move_x, int move_y, const int size){
int tid = blockIdx.x*blockDim.x*8 + threadIdx.x;
int id = absolute(400*move_y + move_x + tid);
if(id < size){
#pragma unroll
for(int i = 0; i < 8; i++){
temp[tid+blockSize*i] = energy[id+blockSize*i]+energy[tid+blockSize*i];
}
}
}
/***********************************************************************************
Sum takes in each array after the move and sums them back together to get the correct
energy values for the field. This function exploits a thread level parallelism of 32
************************************************************************************/
__global__ void sum(double* temp, double* energy, const int size){
int tid = blockIdx.x*blockDim.x*32 + threadIdx.x;
if(tid < size){
#pragma unroll
for(int i = 0; i < 32; i++){
temp[tid + i*blockSize] += energy[tid + i*blockSize];
}
}
}
//Read in energy source file data
void getSourceFile(std::vector<double>& eNomVec, std::vector<double>& rangeVec,
std::vector<double>& sigmaXVec,std::vector<double>& sigmaYVec,
std::vector<double>& eMeanVec, std::vector<double>& sigmaEVec,
std::vector<double>& xVec, std::vector<double>& yVec,
std::vector<double>& nxVec,std::vector<double>& nyVec,
std::vector<double>& weightVec, int& numGroups)
{
int dateOfMeasurement;
long int numberOfGroups;
double eNom, range, sigmaX, sigmaY, eMean, sigmaE, xcoord, ycoord, weight, nx, ny;
std::string line;
//declare and open file
std::ifstream ifile("IMPT_source.dat", std::ios::in);
if(!ifile){
std::cout << "Error, IMPT_source not found" << std::endl;
}else{
//read in date of measurement
ifile >> dateOfMeasurement;
//read in number of groups
ifile >> numberOfGroups;
numGroups = numberOfGroups;
//skip over header line
std::string e, r, x, y, m, s, nx1, ny1, x1, y1, w;
ifile >> e;
ifile >> r;
ifile >> x;
ifile >> y;
ifile >> m;
ifile >> s;
ifile >> x1;
ifile >> y1;
ifile >> nx1;
ifile >> ny1;
ifile >> w;
//intialize memory for faster read in
xVec.reserve(numberOfGroups);
yVec.reserve(numberOfGroups);
nxVec.reserve(numberOfGroups);
nyVec.reserve(numberOfGroups);
weightVec.reserve(numberOfGroups);
eNomVec.reserve(numberOfGroups);
//read in data to vectorsG
for(int i = 0; i < numberOfGroups; i++){
ifile >> eNom;
ifile >> range;
ifile >> sigmaX;
ifile >> sigmaY;
ifile >> eMean;
ifile >> sigmaE;
ifile >> xcoord;
ifile >> ycoord;
ifile >> nx;
ifile >> ny;
ifile >> weight;
eNomVec.push_back(eNom);
// rangeVec.push_back(range);
// sigmaXVec.push_back(sigmaX);
// sigmaYVec.push_back(sigmaY);
// eMeanVec.push_back(eMean);
xVec.push_back(xcoord);
yVec.push_back(ycoord);
nxVec.push_back(nx);
nyVec.push_back(ny);
weightVec.push_back(weight);
}
}
}
int main(int argc, char** argv){
//get command line arguments for the elements to loop over and error check
if(argc < 2){
std::cout << "Too few arguments, need two for range of beam values" << std::endl;
exit(EXIT_FAILURE);
}else if(argc > 5 ){
std::cout << "Too many arguments, need two for range of beam values" << std::endl;
exit(EXIT_FAILURE);
}else if(atoi(argv[1]) <= 0 || atoi(argv[1]) > 94 || atoi(argv[2]) <= 0 || atoi(argv[2]) > 94){
std::cout << "Arguments out of range, must be in range [1,94]" << std::endl;
exit(EXIT_FAILURE);
}
//declare stuff for source file read
int numberOfGroups;
std::vector<double> eNom, range, sigmaX, sigmaY, eMean, sigmaE, xCoord, yCoord, nx, ny, weight;
getSourceFile(eNom, range, sigmaX, sigmaY, eMean, sigmaE, xCoord, yCoord, nx, ny, weight, numberOfGroups);
//intialize device for faster update of values using all 16 GPUs as defined by the run.sh script
hipSetDevice(atoi(argv[3]));
for(int master = atoi(argv[1])-1; master < atoi(argv[2]); master++){
//declare stream size variables and open file/check for errors
std::streampos bufferSize;
//create fileName to read in data
std::ostringstream fName;
if(master < 9){
fName << std::fixed << "GyPerMU3D_beamlet_0" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV.bin";
}else{
fName << std::fixed << "GyPerMU3D_beamlet_" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV.bin";
}
std::string fileName = fName.str();
std::ifstream ifile(fileName.c_str(), std::ios::in | std::ios::binary);
if(!ifile){
std::cout << "Error, no file found" << std::endl;
exit(1);
}
//get file size
ifile.seekg(0, std::ios::end);
bufferSize = ifile.tellg();
ifile.seekg(0, std::ios::beg);
//declare buffer
std::vector<double> buffer(bufferSize/sizeof(double));
//read in data
ifile.read(reinterpret_cast<char*>(buffer.data()), bufferSize);
//declare size of data for later malloc's
int size = bufferSize/(sizeof(double)*z);
//fill new array of 400x400 field correctly with zeroes in each layer for extra points out of range
std::vector<double> key;
for(int k = 0; k < 400; k++){
for(int i = 0; i < 400; i++){
for(int j = 0; j < 400; j++){
if(j < 100 && i < 100){
key.push_back(0);
}else if(j >= 100 && j < 300 && i >= 100 && i < 300){
key.push_back(buffer[(200*(i-100) + (j-100))+(k*40000)]);
}else{
key.push_back(0);
}
}
}
}
//copy memory from buffer to energy
double *energy;
energy = (double*)malloc(energySize*sizeof(double));
std::copy(key.begin(), key.end(), energy);
//free memory from buffer
std::vector<double>().swap(buffer);
///create #spots variable;
int spots = nx[master]*ny[master];
//declare move arrays for grid distribution
std::vector<float> move1, move2;
move1.reserve(spots);
move2.reserve(spots);
int moveX[spots], moveY[spots];
double fieldSize = -1*(xCoord[master]+yCoord[master])/(nx[master]-1);
double field = -1*(xCoord[master]+yCoord[master]);
//declare spacings
int spaceX = ceil(field*10/(nx[master]));
int spaceY = ceil(field*10/(ny[master]));
for(int i = 0, x = xCoord[master]*10; i < nx[master]; i++, x += spaceX){
for(int j = 0, y = yCoord[master]*10; j < ny[master]; j++, y+= spaceY){
move1.push_back(x);
move2.push_back(y);
}
}
//copy to arrays for cuda since vectors are more annoying to work with as they are heap objects
std::copy(move1.begin(), move1.end(), moveX);
std::copy(move2.begin(), move2.end(), moveY);
/***********************************************************************************************************/
//Declare gridSize for cuda kernels
int gridSize = (energySize+blockSize-1)/blockSize;
//declare host arrays
double *temp_energy, *h_energy;
//allocate an array to hold the sum of each movement on the device
hipHostMalloc((void**)&temp_energy, energySize*sizeof(double), hipHostMallocDefault);
cudaCheckError();
double *d_energy;
hipMalloc((void**)&d_energy, energySize*sizeof(double));
cudaCheckError();
hipMemcpy(d_energy, energy, energySize*sizeof(double), hipMemcpyHostToDevice);
cudaCheckError();
//loop to perform all the moves.
for(int i = 0; i < spots; i++){
double *d_temp;
hipMalloc((void**)&d_temp, energySize*sizeof(double));
cudaCheckError();
//kernel to perform all the moves for the grid
beamShift<<<gridSize/8, blockSize>>>(d_energy, d_temp, moveX[i], moveY[i], energySize);
sum<<<gridSize/32, blockSize>>>(temp_energy, d_temp, energySize);
hipFree(d_temp);
}
//read off the temp_energy vector from the device
hipHostMalloc((void**)&h_energy, energySize*sizeof(double), hipHostMallocDefault);
cudaCheckError();
hipMemcpyAsync(h_energy, temp_energy, energySize*sizeof(double), hipMemcpyDeviceToHost);
cudaCheckError();
//final kernel to subtract off extra copies of energy
for(int i = 0; i < energySize; i++){
h_energy[i] -= energy[i]*(spots);
//h_energy[i] *= weight[master]; TODO: Uncomment for weighted run
if(h_energy[i] < 0){
h_energy[i] = 0;
}
}
//read out to 94 files individually
std::ostringstream OName;
if(master < 9){
OName << std::fixed << "GyPerMU3D_0" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV_field_" << std::setprecision(0) << nx[master] << "by"
<< std::setprecision(0) << ny[master] << "spots_" << std::setprecision(2) << fieldSize << "by" << std::setprecision(2) << fieldSize << "cm2spacing.bin";
}else{
OName << std::fixed << "GyPerMU3D_" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV_field_" << std::setprecision(0) << nx[master] << "by"
<< std::setprecision(0) << ny[master] << "spots_" << std::setprecision(2) << fieldSize << "by" << std::setprecision(2) << fieldSize << "cm2spacing.bin";
}
std::string fileNameOut = OName.str();
std::ofstream ofile(fileNameOut.c_str() , std::ios::out | std::ios::binary);
ofile.write(reinterpret_cast<char*>(h_energy), energySize*sizeof(double));
hipDeviceReset();
}//end of master loop
}//end of main |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <fstream>
#include <iterator>
#include <vector>
#include <iostream>
#include <cstdlib>
#include <string>
#include <sstream>
#include <iomanip>
#include <math.h>
#include <stdio.h>
//Define the Z direction size as a global variable
#define z 400
#define blockSize 128
#define energySize 64000000
//define macro for error checking
#define cudaCheckError(){ \
hipError_t err = hipGetLastError(); \
if(err != hipSuccess){ \
std::cout << "Error in " << __FILE__ << " at line " << __LINE__ << " : " << hipGetErrorString(err) << std::endl; \
exit(EXIT_FAILURE); \
} \
}
__inline__ __device__ int absolute(int value){
return value < 0 ? value*-1 : value;
}
/***********************************************************************************
BeamShift takes in energy array from the host loop and moves the coordinates
as specified to move the beam grid, given by the starting value 400*y + x, which
corresponds to the beginning of the offset to write the energy value into
corresponding to the "move". Alternatively, think of the "move" in x-y direction as
having to be mapped back into a location in the 64M element energy array, since the
energy array contains all information about how the field is distributed and paraview
does the coordinate assignation.
************************************************************************************/
__global__ void beamShift(double* energy, double *temp, int move_x, int move_y, const int size){
int tid = blockIdx.x*blockDim.x*8 + threadIdx.x;
int id = absolute(400*move_y + move_x + tid);
if(id < size){
#pragma unroll
for(int i = 0; i < 8; i++){
temp[tid+blockSize*i] = energy[id+blockSize*i]+energy[tid+blockSize*i];
}
}
}
/***********************************************************************************
Sum takes in each array after the move and sums them back together to get the correct
energy values for the field. This function exploits a thread level parallelism of 32
************************************************************************************/
__global__ void sum(double* temp, double* energy, const int size){
int tid = blockIdx.x*blockDim.x*32 + threadIdx.x;
if(tid < size){
#pragma unroll
for(int i = 0; i < 32; i++){
temp[tid + i*blockSize] += energy[tid + i*blockSize];
}
}
}
//Read in energy source file data
void getSourceFile(std::vector<double>& eNomVec, std::vector<double>& rangeVec,
std::vector<double>& sigmaXVec,std::vector<double>& sigmaYVec,
std::vector<double>& eMeanVec, std::vector<double>& sigmaEVec,
std::vector<double>& xVec, std::vector<double>& yVec,
std::vector<double>& nxVec,std::vector<double>& nyVec,
std::vector<double>& weightVec, int& numGroups)
{
int dateOfMeasurement;
long int numberOfGroups;
double eNom, range, sigmaX, sigmaY, eMean, sigmaE, xcoord, ycoord, weight, nx, ny;
std::string line;
//declare and open file
std::ifstream ifile("IMPT_source.dat", std::ios::in);
if(!ifile){
std::cout << "Error, IMPT_source not found" << std::endl;
}else{
//read in date of measurement
ifile >> dateOfMeasurement;
//read in number of groups
ifile >> numberOfGroups;
numGroups = numberOfGroups;
//skip over header line
std::string e, r, x, y, m, s, nx1, ny1, x1, y1, w;
ifile >> e;
ifile >> r;
ifile >> x;
ifile >> y;
ifile >> m;
ifile >> s;
ifile >> x1;
ifile >> y1;
ifile >> nx1;
ifile >> ny1;
ifile >> w;
//intialize memory for faster read in
xVec.reserve(numberOfGroups);
yVec.reserve(numberOfGroups);
nxVec.reserve(numberOfGroups);
nyVec.reserve(numberOfGroups);
weightVec.reserve(numberOfGroups);
eNomVec.reserve(numberOfGroups);
//read in data to vectorsG
for(int i = 0; i < numberOfGroups; i++){
ifile >> eNom;
ifile >> range;
ifile >> sigmaX;
ifile >> sigmaY;
ifile >> eMean;
ifile >> sigmaE;
ifile >> xcoord;
ifile >> ycoord;
ifile >> nx;
ifile >> ny;
ifile >> weight;
eNomVec.push_back(eNom);
// rangeVec.push_back(range);
// sigmaXVec.push_back(sigmaX);
// sigmaYVec.push_back(sigmaY);
// eMeanVec.push_back(eMean);
xVec.push_back(xcoord);
yVec.push_back(ycoord);
nxVec.push_back(nx);
nyVec.push_back(ny);
weightVec.push_back(weight);
}
}
}
int main(int argc, char** argv){
//get command line arguments for the elements to loop over and error check
if(argc < 2){
std::cout << "Too few arguments, need two for range of beam values" << std::endl;
exit(EXIT_FAILURE);
}else if(argc > 5 ){
std::cout << "Too many arguments, need two for range of beam values" << std::endl;
exit(EXIT_FAILURE);
}else if(atoi(argv[1]) <= 0 || atoi(argv[1]) > 94 || atoi(argv[2]) <= 0 || atoi(argv[2]) > 94){
std::cout << "Arguments out of range, must be in range [1,94]" << std::endl;
exit(EXIT_FAILURE);
}
//declare stuff for source file read
int numberOfGroups;
std::vector<double> eNom, range, sigmaX, sigmaY, eMean, sigmaE, xCoord, yCoord, nx, ny, weight;
getSourceFile(eNom, range, sigmaX, sigmaY, eMean, sigmaE, xCoord, yCoord, nx, ny, weight, numberOfGroups);
//intialize device for faster update of values using all 16 GPUs as defined by the run.sh script
hipSetDevice(atoi(argv[3]));
for(int master = atoi(argv[1])-1; master < atoi(argv[2]); master++){
//declare stream size variables and open file/check for errors
std::streampos bufferSize;
//create fileName to read in data
std::ostringstream fName;
if(master < 9){
fName << std::fixed << "GyPerMU3D_beamlet_0" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV.bin";
}else{
fName << std::fixed << "GyPerMU3D_beamlet_" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV.bin";
}
std::string fileName = fName.str();
std::ifstream ifile(fileName.c_str(), std::ios::in | std::ios::binary);
if(!ifile){
std::cout << "Error, no file found" << std::endl;
exit(1);
}
//get file size
ifile.seekg(0, std::ios::end);
bufferSize = ifile.tellg();
ifile.seekg(0, std::ios::beg);
//declare buffer
std::vector<double> buffer(bufferSize/sizeof(double));
//read in data
ifile.read(reinterpret_cast<char*>(buffer.data()), bufferSize);
//declare size of data for later malloc's
int size = bufferSize/(sizeof(double)*z);
//fill new array of 400x400 field correctly with zeroes in each layer for extra points out of range
std::vector<double> key;
for(int k = 0; k < 400; k++){
for(int i = 0; i < 400; i++){
for(int j = 0; j < 400; j++){
if(j < 100 && i < 100){
key.push_back(0);
}else if(j >= 100 && j < 300 && i >= 100 && i < 300){
key.push_back(buffer[(200*(i-100) + (j-100))+(k*40000)]);
}else{
key.push_back(0);
}
}
}
}
//copy memory from buffer to energy
double *energy;
energy = (double*)malloc(energySize*sizeof(double));
std::copy(key.begin(), key.end(), energy);
//free memory from buffer
std::vector<double>().swap(buffer);
///create #spots variable;
int spots = nx[master]*ny[master];
//declare move arrays for grid distribution
std::vector<float> move1, move2;
move1.reserve(spots);
move2.reserve(spots);
int moveX[spots], moveY[spots];
double fieldSize = -1*(xCoord[master]+yCoord[master])/(nx[master]-1);
double field = -1*(xCoord[master]+yCoord[master]);
//declare spacings
int spaceX = ceil(field*10/(nx[master]));
int spaceY = ceil(field*10/(ny[master]));
for(int i = 0, x = xCoord[master]*10; i < nx[master]; i++, x += spaceX){
for(int j = 0, y = yCoord[master]*10; j < ny[master]; j++, y+= spaceY){
move1.push_back(x);
move2.push_back(y);
}
}
//copy to arrays for cuda since vectors are more annoying to work with as they are heap objects
std::copy(move1.begin(), move1.end(), moveX);
std::copy(move2.begin(), move2.end(), moveY);
/***********************************************************************************************************/
//Declare gridSize for cuda kernels
int gridSize = (energySize+blockSize-1)/blockSize;
//declare host arrays
double *temp_energy, *h_energy;
//allocate an array to hold the sum of each movement on the device
hipHostMalloc((void**)&temp_energy, energySize*sizeof(double), hipHostMallocDefault);
cudaCheckError();
double *d_energy;
hipMalloc((void**)&d_energy, energySize*sizeof(double));
cudaCheckError();
hipMemcpy(d_energy, energy, energySize*sizeof(double), hipMemcpyHostToDevice);
cudaCheckError();
//loop to perform all the moves.
for(int i = 0; i < spots; i++){
double *d_temp;
hipMalloc((void**)&d_temp, energySize*sizeof(double));
cudaCheckError();
//kernel to perform all the moves for the grid
beamShift<<<gridSize/8, blockSize>>>(d_energy, d_temp, moveX[i], moveY[i], energySize);
sum<<<gridSize/32, blockSize>>>(temp_energy, d_temp, energySize);
hipFree(d_temp);
}
//read off the temp_energy vector from the device
hipHostMalloc((void**)&h_energy, energySize*sizeof(double), hipHostMallocDefault);
cudaCheckError();
hipMemcpyAsync(h_energy, temp_energy, energySize*sizeof(double), hipMemcpyDeviceToHost);
cudaCheckError();
//final kernel to subtract off extra copies of energy
for(int i = 0; i < energySize; i++){
h_energy[i] -= energy[i]*(spots);
//h_energy[i] *= weight[master]; TODO: Uncomment for weighted run
if(h_energy[i] < 0){
h_energy[i] = 0;
}
}
//read out to 94 files individually
std::ostringstream OName;
if(master < 9){
OName << std::fixed << "GyPerMU3D_0" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV_field_" << std::setprecision(0) << nx[master] << "by"
<< std::setprecision(0) << ny[master] << "spots_" << std::setprecision(2) << fieldSize << "by" << std::setprecision(2) << fieldSize << "cm2spacing.bin";
}else{
OName << std::fixed << "GyPerMU3D_" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV_field_" << std::setprecision(0) << nx[master] << "by"
<< std::setprecision(0) << ny[master] << "spots_" << std::setprecision(2) << fieldSize << "by" << std::setprecision(2) << fieldSize << "cm2spacing.bin";
}
std::string fileNameOut = OName.str();
std::ofstream ofile(fileNameOut.c_str() , std::ios::out | std::ios::binary);
ofile.write(reinterpret_cast<char*>(h_energy), energySize*sizeof(double));
hipDeviceReset();
}//end of master loop
}//end of main | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9beamShiftPdS_iii
.globl _Z9beamShiftPdS_iii
.p2align 8
.type _Z9beamShiftPdS_iii,@function
_Z9beamShiftPdS_iii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_mulk_i32 s3, 0x190
s_mul_i32 s15, s15, s4
s_load_b32 s4, s[0:1], 0x18
v_lshl_add_u32 v0, s15, 3, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add3_u32 v1, s3, s2, v0
s_mov_b32 s2, exec_lo
v_sub_nc_u32_e32 v2, 0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_max_i32_e32 v2, v1, v2
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s4, v2
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_mov_b32_e32 v3, 0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 3, v[2:3]
v_lshlrev_b64 v[0:1], 3, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v10, vcc_lo, v0, 0x400
s_clause 0x1
global_load_b64 v[6:7], v[2:3], off
global_load_b64 v[4:5], v[4:5], off
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[6:7], v[4:5]
v_add_co_u32 v6, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v8, vcc_lo, s0, v10
v_add_co_ci_u32_e32 v9, vcc_lo, s1, v11, vcc_lo
v_add_co_u32 v12, vcc_lo, v0, 0x800
v_add_co_ci_u32_e32 v13, vcc_lo, 0, v1, vcc_lo
global_store_b64 v[6:7], v[4:5], off
s_clause 0x1
global_load_b64 v[4:5], v[2:3], off offset:1024
global_load_b64 v[6:7], v[8:9], off
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[4:5], v[6:7]
v_add_co_u32 v6, vcc_lo, s2, v10
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v11, vcc_lo
v_add_co_u32 v8, vcc_lo, s0, v12
v_add_co_ci_u32_e32 v9, vcc_lo, s1, v13, vcc_lo
v_add_co_u32 v10, vcc_lo, v0, 0xc00
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v1, vcc_lo
global_store_b64 v[6:7], v[4:5], off
s_clause 0x1
global_load_b64 v[4:5], v[2:3], off offset:2048
global_load_b64 v[6:7], v[8:9], off
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[4:5], v[6:7]
v_add_co_u32 v6, vcc_lo, s2, v12
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v13, vcc_lo
v_add_co_u32 v8, vcc_lo, s0, v10
v_add_co_ci_u32_e32 v9, vcc_lo, s1, v11, vcc_lo
v_add_co_u32 v12, vcc_lo, v0, 0x1000
v_add_co_ci_u32_e32 v13, vcc_lo, 0, v1, vcc_lo
global_store_b64 v[6:7], v[4:5], off
s_clause 0x1
global_load_b64 v[4:5], v[2:3], off offset:3072
global_load_b64 v[6:7], v[8:9], off
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[4:5], v[6:7]
v_add_co_u32 v6, vcc_lo, s2, v10
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v11, vcc_lo
v_add_co_u32 v2, vcc_lo, 0x1000, v2
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
v_add_co_u32 v8, vcc_lo, s0, v12
v_add_co_ci_u32_e32 v9, vcc_lo, s1, v13, vcc_lo
v_add_co_u32 v10, vcc_lo, v0, 0x1400
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v1, vcc_lo
global_store_b64 v[6:7], v[4:5], off
s_clause 0x1
global_load_b64 v[4:5], v[2:3], off
global_load_b64 v[6:7], v[8:9], off
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[4:5], v[6:7]
v_add_co_u32 v6, vcc_lo, s2, v12
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v13, vcc_lo
v_add_co_u32 v8, vcc_lo, s0, v10
v_add_co_ci_u32_e32 v9, vcc_lo, s1, v11, vcc_lo
v_add_co_u32 v12, vcc_lo, v0, 0x1800
v_add_co_ci_u32_e32 v13, vcc_lo, 0, v1, vcc_lo
global_store_b64 v[6:7], v[4:5], off
s_clause 0x1
global_load_b64 v[4:5], v[2:3], off offset:1024
global_load_b64 v[6:7], v[8:9], off
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[4:5], v[6:7]
v_add_co_u32 v6, vcc_lo, s2, v10
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v11, vcc_lo
v_add_co_u32 v8, vcc_lo, s0, v12
v_add_co_ci_u32_e32 v9, vcc_lo, s1, v13, vcc_lo
global_store_b64 v[6:7], v[4:5], off
s_clause 0x1
global_load_b64 v[4:5], v[2:3], off offset:2048
global_load_b64 v[6:7], v[8:9], off
v_add_co_u32 v8, vcc_lo, v0, 0x1c00
v_add_co_ci_u32_e32 v9, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v12
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v13, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[4:5], v[4:5], v[6:7]
v_add_co_u32 v6, vcc_lo, s0, v8
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v9, vcc_lo
global_store_b64 v[0:1], v[4:5], off
s_clause 0x1
global_load_b64 v[0:1], v[2:3], off offset:3072
global_load_b64 v[2:3], v[6:7], off
s_waitcnt vmcnt(0)
v_add_f64 v[0:1], v[0:1], v[2:3]
v_add_co_u32 v2, vcc_lo, s2, v8
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v9, vcc_lo
global_store_b64 v[2:3], v[0:1], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9beamShiftPdS_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9beamShiftPdS_iii, .Lfunc_end0-_Z9beamShiftPdS_iii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z3sumPdS_i
.globl _Z3sumPdS_i
.p2align 8
.type _Z3sumPdS_i,@function
_Z3sumPdS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mul_i32 s15, s15, s2
s_mov_b32 s2, exec_lo
v_lshl_add_u32 v0, s15, 5, v0
v_cmpx_gt_i32_e64 s3, v0
s_cbranch_execz .LBB1_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v20, vcc_lo, v0, 0x400
global_load_b64 v[2:3], v[2:3], off
global_load_b64 v[6:7], v[4:5], off
v_add_co_ci_u32_e32 v21, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v22, vcc_lo, v0, 0x800
v_add_co_ci_u32_e32 v23, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v24, vcc_lo, v0, 0xc00
v_add_co_ci_u32_e32 v25, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v8, vcc_lo, s0, v20
v_add_co_ci_u32_e32 v9, vcc_lo, s1, v21, vcc_lo
v_add_co_u32 v10, vcc_lo, s0, v22
v_add_co_ci_u32_e32 v11, vcc_lo, s1, v23, vcc_lo
v_add_co_u32 v12, vcc_lo, s0, v24
v_add_co_ci_u32_e32 v13, vcc_lo, s1, v25, vcc_lo
s_clause 0x2
global_load_b64 v[14:15], v[8:9], off
global_load_b64 v[16:17], v[10:11], off
global_load_b64 v[18:19], v[12:13], off
s_waitcnt vmcnt(3)
v_add_f64 v[2:3], v[2:3], v[6:7]
v_add_co_u32 v6, vcc_lo, s2, v20
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v21, vcc_lo
global_store_b64 v[4:5], v[2:3], off
global_load_b64 v[2:3], v[6:7], off
v_add_co_u32 v4, vcc_lo, s2, v22
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v23, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[14:15]
global_store_b64 v[8:9], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s2, v24
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v25, vcc_lo
v_add_co_u32 v6, vcc_lo, v0, 0x1000
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[16:17]
global_store_b64 v[10:11], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s0, v6
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s2, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b64 v[8:9], v[4:5], off
v_add_co_u32 v20, vcc_lo, v0, 0x1400
v_add_co_ci_u32_e32 v21, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v22, vcc_lo, v0, 0x1800
v_add_co_ci_u32_e32 v23, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v24, vcc_lo, v0, 0x1c00
v_add_co_ci_u32_e32 v25, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f64 v[2:3], v[2:3], v[18:19]
global_store_b64 v[12:13], v[2:3], off
global_load_b64 v[2:3], v[6:7], off
v_add_co_u32 v6, vcc_lo, s0, v20
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v21, vcc_lo
v_add_co_u32 v10, vcc_lo, s0, v22
v_add_co_ci_u32_e32 v11, vcc_lo, s1, v23, vcc_lo
v_add_co_u32 v12, vcc_lo, s0, v24
v_add_co_ci_u32_e32 v13, vcc_lo, s1, v25, vcc_lo
s_clause 0x2
global_load_b64 v[14:15], v[6:7], off
global_load_b64 v[16:17], v[10:11], off
global_load_b64 v[18:19], v[12:13], off
s_waitcnt vmcnt(3)
v_add_f64 v[2:3], v[2:3], v[8:9]
v_add_co_u32 v8, vcc_lo, s2, v20
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v21, vcc_lo
global_store_b64 v[4:5], v[2:3], off
global_load_b64 v[2:3], v[8:9], off
v_add_co_u32 v4, vcc_lo, s2, v22
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v23, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[14:15]
global_store_b64 v[6:7], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s2, v24
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v25, vcc_lo
v_add_co_u32 v6, vcc_lo, v0, 0x2000
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[16:17]
global_store_b64 v[10:11], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s0, v6
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s2, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b64 v[8:9], v[4:5], off
v_add_co_u32 v20, vcc_lo, v0, 0x2400
v_add_co_ci_u32_e32 v21, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v22, vcc_lo, v0, 0x2800
v_add_co_ci_u32_e32 v23, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v24, vcc_lo, v0, 0x2c00
v_add_co_ci_u32_e32 v25, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f64 v[2:3], v[2:3], v[18:19]
global_store_b64 v[12:13], v[2:3], off
global_load_b64 v[2:3], v[6:7], off
v_add_co_u32 v6, vcc_lo, s0, v20
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v21, vcc_lo
v_add_co_u32 v10, vcc_lo, s0, v22
v_add_co_ci_u32_e32 v11, vcc_lo, s1, v23, vcc_lo
v_add_co_u32 v12, vcc_lo, s0, v24
v_add_co_ci_u32_e32 v13, vcc_lo, s1, v25, vcc_lo
s_clause 0x2
global_load_b64 v[14:15], v[6:7], off
global_load_b64 v[16:17], v[10:11], off
global_load_b64 v[18:19], v[12:13], off
s_waitcnt vmcnt(3)
v_add_f64 v[2:3], v[2:3], v[8:9]
v_add_co_u32 v8, vcc_lo, s2, v20
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v21, vcc_lo
global_store_b64 v[4:5], v[2:3], off
global_load_b64 v[2:3], v[8:9], off
v_add_co_u32 v4, vcc_lo, s2, v22
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v23, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[14:15]
global_store_b64 v[6:7], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s2, v24
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v25, vcc_lo
v_add_co_u32 v6, vcc_lo, v0, 0x3000
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[16:17]
global_store_b64 v[10:11], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s0, v6
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s2, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b64 v[8:9], v[4:5], off
v_add_co_u32 v20, vcc_lo, v0, 0x3400
v_add_co_ci_u32_e32 v21, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v22, vcc_lo, v0, 0x3800
v_add_co_ci_u32_e32 v23, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v24, vcc_lo, v0, 0x3c00
v_add_co_ci_u32_e32 v25, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f64 v[2:3], v[2:3], v[18:19]
global_store_b64 v[12:13], v[2:3], off
global_load_b64 v[2:3], v[6:7], off
v_add_co_u32 v6, vcc_lo, s0, v20
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v21, vcc_lo
v_add_co_u32 v10, vcc_lo, s0, v22
v_add_co_ci_u32_e32 v11, vcc_lo, s1, v23, vcc_lo
v_add_co_u32 v12, vcc_lo, s0, v24
v_add_co_ci_u32_e32 v13, vcc_lo, s1, v25, vcc_lo
s_clause 0x2
global_load_b64 v[14:15], v[6:7], off
global_load_b64 v[16:17], v[10:11], off
global_load_b64 v[18:19], v[12:13], off
s_waitcnt vmcnt(3)
v_add_f64 v[2:3], v[2:3], v[8:9]
v_add_co_u32 v8, vcc_lo, s2, v20
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v21, vcc_lo
global_store_b64 v[4:5], v[2:3], off
global_load_b64 v[2:3], v[8:9], off
v_add_co_u32 v4, vcc_lo, s2, v22
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v23, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[14:15]
global_store_b64 v[6:7], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s2, v24
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v25, vcc_lo
v_add_co_u32 v6, vcc_lo, v0, 0x4000
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[16:17]
global_store_b64 v[10:11], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s0, v6
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s2, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b64 v[8:9], v[4:5], off
v_add_co_u32 v20, vcc_lo, v0, 0x4400
v_add_co_ci_u32_e32 v21, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v22, vcc_lo, v0, 0x4800
v_add_co_ci_u32_e32 v23, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v24, vcc_lo, v0, 0x4c00
v_add_co_ci_u32_e32 v25, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f64 v[2:3], v[2:3], v[18:19]
global_store_b64 v[12:13], v[2:3], off
global_load_b64 v[2:3], v[6:7], off
v_add_co_u32 v6, vcc_lo, s0, v20
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v21, vcc_lo
v_add_co_u32 v10, vcc_lo, s0, v22
v_add_co_ci_u32_e32 v11, vcc_lo, s1, v23, vcc_lo
v_add_co_u32 v12, vcc_lo, s0, v24
v_add_co_ci_u32_e32 v13, vcc_lo, s1, v25, vcc_lo
s_clause 0x2
global_load_b64 v[14:15], v[6:7], off
global_load_b64 v[16:17], v[10:11], off
global_load_b64 v[18:19], v[12:13], off
s_waitcnt vmcnt(3)
v_add_f64 v[2:3], v[2:3], v[8:9]
v_add_co_u32 v8, vcc_lo, s2, v20
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v21, vcc_lo
global_store_b64 v[4:5], v[2:3], off
global_load_b64 v[2:3], v[8:9], off
v_add_co_u32 v4, vcc_lo, s2, v22
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v23, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[14:15]
global_store_b64 v[6:7], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s2, v24
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v25, vcc_lo
v_add_co_u32 v6, vcc_lo, v0, 0x5000
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[16:17]
global_store_b64 v[10:11], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s0, v6
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s2, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b64 v[8:9], v[4:5], off
v_add_co_u32 v20, vcc_lo, v0, 0x5400
v_add_co_ci_u32_e32 v21, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v22, vcc_lo, v0, 0x5800
v_add_co_ci_u32_e32 v23, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v24, vcc_lo, v0, 0x5c00
v_add_co_ci_u32_e32 v25, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f64 v[2:3], v[2:3], v[18:19]
global_store_b64 v[12:13], v[2:3], off
global_load_b64 v[2:3], v[6:7], off
v_add_co_u32 v6, vcc_lo, s0, v20
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v21, vcc_lo
v_add_co_u32 v10, vcc_lo, s0, v22
v_add_co_ci_u32_e32 v11, vcc_lo, s1, v23, vcc_lo
v_add_co_u32 v12, vcc_lo, s0, v24
v_add_co_ci_u32_e32 v13, vcc_lo, s1, v25, vcc_lo
s_clause 0x2
global_load_b64 v[14:15], v[6:7], off
global_load_b64 v[16:17], v[10:11], off
global_load_b64 v[18:19], v[12:13], off
s_waitcnt vmcnt(3)
v_add_f64 v[2:3], v[2:3], v[8:9]
v_add_co_u32 v8, vcc_lo, s2, v20
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v21, vcc_lo
global_store_b64 v[4:5], v[2:3], off
global_load_b64 v[2:3], v[8:9], off
v_add_co_u32 v4, vcc_lo, s2, v22
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v23, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[14:15]
global_store_b64 v[6:7], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s2, v24
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v25, vcc_lo
v_add_co_u32 v6, vcc_lo, v0, 0x6000
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[16:17]
global_store_b64 v[10:11], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s0, v6
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s2, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b64 v[8:9], v[4:5], off
v_add_co_u32 v20, vcc_lo, v0, 0x6400
v_add_co_ci_u32_e32 v21, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v22, vcc_lo, v0, 0x6800
v_add_co_ci_u32_e32 v23, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v24, vcc_lo, v0, 0x6c00
v_add_co_ci_u32_e32 v25, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f64 v[2:3], v[2:3], v[18:19]
global_store_b64 v[12:13], v[2:3], off
global_load_b64 v[2:3], v[6:7], off
v_add_co_u32 v6, vcc_lo, s0, v20
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v21, vcc_lo
v_add_co_u32 v10, vcc_lo, s0, v22
v_add_co_ci_u32_e32 v11, vcc_lo, s1, v23, vcc_lo
v_add_co_u32 v12, vcc_lo, s0, v24
v_add_co_ci_u32_e32 v13, vcc_lo, s1, v25, vcc_lo
s_clause 0x2
global_load_b64 v[14:15], v[6:7], off
global_load_b64 v[16:17], v[10:11], off
global_load_b64 v[18:19], v[12:13], off
s_waitcnt vmcnt(3)
v_add_f64 v[2:3], v[2:3], v[8:9]
v_add_co_u32 v8, vcc_lo, s2, v20
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v21, vcc_lo
global_store_b64 v[4:5], v[2:3], off
global_load_b64 v[2:3], v[8:9], off
v_add_co_u32 v4, vcc_lo, s2, v22
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v23, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[14:15]
global_store_b64 v[6:7], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s2, v24
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v25, vcc_lo
v_add_co_u32 v6, vcc_lo, v0, 0x7000
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[16:17]
global_store_b64 v[10:11], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s2, v6
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s0, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v7, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[18:19]
v_add_co_u32 v18, vcc_lo, v0, 0x7400
v_add_co_ci_u32_e32 v19, vcc_lo, 0, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s0, v18
v_add_co_ci_u32_e32 v9, vcc_lo, s1, v19, vcc_lo
v_add_co_u32 v20, vcc_lo, v0, 0x7800
v_add_co_ci_u32_e32 v21, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v22, vcc_lo, v0, 0x7c00
v_add_co_ci_u32_e32 v23, vcc_lo, 0, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s0, v20
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v21, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v10, vcc_lo, s0, v22
v_add_co_ci_u32_e32 v11, vcc_lo, s1, v23, vcc_lo
global_store_b64 v[12:13], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
s_clause 0x3
global_load_b64 v[4:5], v[6:7], off
global_load_b64 v[12:13], v[8:9], off
global_load_b64 v[14:15], v[0:1], off
global_load_b64 v[16:17], v[10:11], off
s_waitcnt vmcnt(3)
v_add_f64 v[2:3], v[2:3], v[4:5]
v_add_co_u32 v4, vcc_lo, s2, v18
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v19, vcc_lo
global_store_b64 v[6:7], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s2, v20
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v21, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[12:13]
global_store_b64 v[8:9], v[2:3], off
global_load_b64 v[2:3], v[4:5], off
v_add_co_u32 v4, vcc_lo, s2, v22
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v23, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[14:15]
global_store_b64 v[0:1], v[2:3], off
global_load_b64 v[0:1], v[4:5], off
s_waitcnt vmcnt(0)
v_add_f64 v[0:1], v[0:1], v[16:17]
global_store_b64 v[10:11], v[0:1], off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3sumPdS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 26
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z3sumPdS_i, .Lfunc_end1-_Z3sumPdS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9beamShiftPdS_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9beamShiftPdS_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3sumPdS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3sumPdS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 26
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void gpu_grayscale(int width, int height, float *image, float *image_out)
{
////////////////
// TO-DO #4.2 /////////////////////////////////////////////
// Implement the GPU version of the grayscale conversion //
///////////////////////////////////////////////////////////
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
int offset_out = ((width * y) + x);
int offset = offset_out*3;
float *pixel = &image[offset];
image_out[offset_out] = pixel[0] * 0.0722f + // B
pixel[1] * 0.7152f + // G
pixel[2] * 0.2126f; // R
}
} | code for sm_80
Function : _Z13gpu_grayscaleiiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x164], PT ; /* 0x0000590003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x160], P0 ; /* 0x0000580000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD R0, R3, c[0x0][0x160], R0 ; /* 0x0000580003007a24 */
/* 0x000fe200078e0200 */
/*00b0*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00d0*/ LEA R2, R0, R0, 0x1 ; /* 0x0000000000027211 */
/* 0x000fcc00078e08ff */
/*00e0*/ IMAD.WIDE R2, R2, R9, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0209 */
/*00f0*/ LDG.E R5, [R2.64+0x4] ; /* 0x0000040402057981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ee8000c1e1900 */
/*0110*/ LDG.E R7, [R2.64+0x8] ; /* 0x0000080402077981 */
/* 0x000f22000c1e1900 */
/*0120*/ FMUL R5, R5, 0.71520000696182250977 ; /* 0x3f37175905057820 */
/* 0x004fc80000400000 */
/*0130*/ FFMA R6, R4, 0.072200000286102294922, R5 ; /* 0x3d93dd9804067823 */
/* 0x008fe40000000005 */
/*0140*/ IMAD.WIDE R4, R0, R9, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fc800078e0209 */
/*0150*/ FFMA R7, R7, 0.21259999275207519531, R6 ; /* 0x3e59b3d007077823 */
/* 0x010fca0000000006 */
/*0160*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0170*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0180*/ BRA 0x180; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void gpu_grayscale(int width, int height, float *image, float *image_out)
{
////////////////
// TO-DO #4.2 /////////////////////////////////////////////
// Implement the GPU version of the grayscale conversion //
///////////////////////////////////////////////////////////
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
int offset_out = ((width * y) + x);
int offset = offset_out*3;
float *pixel = &image[offset];
image_out[offset_out] = pixel[0] * 0.0722f + // B
pixel[1] * 0.7152f + // G
pixel[2] * 0.2126f; // R
}
} | .file "tmpxft_000f995c_00000000-6_gpu_grayscale.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_
.type _Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_, @function
_Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13gpu_grayscaleiiPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_, .-_Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_
.globl _Z13gpu_grayscaleiiPfS_
.type _Z13gpu_grayscaleiiPfS_, @function
_Z13gpu_grayscaleiiPfS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13gpu_grayscaleiiPfS_, .-_Z13gpu_grayscaleiiPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13gpu_grayscaleiiPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13gpu_grayscaleiiPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void gpu_grayscale(int width, int height, float *image, float *image_out)
{
////////////////
// TO-DO #4.2 /////////////////////////////////////////////
// Implement the GPU version of the grayscale conversion //
///////////////////////////////////////////////////////////
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
int offset_out = ((width * y) + x);
int offset = offset_out*3;
float *pixel = &image[offset];
image_out[offset_out] = pixel[0] * 0.0722f + // B
pixel[1] * 0.7152f + // G
pixel[2] * 0.2126f; // R
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_grayscale(int width, int height, float *image, float *image_out)
{
////////////////
// TO-DO #4.2 /////////////////////////////////////////////
// Implement the GPU version of the grayscale conversion //
///////////////////////////////////////////////////////////
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
int offset_out = ((width * y) + x);
int offset = offset_out*3;
float *pixel = &image[offset];
image_out[offset_out] = pixel[0] * 0.0722f + // B
pixel[1] * 0.7152f + // G
pixel[2] * 0.2126f; // R
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_grayscale(int width, int height, float *image, float *image_out)
{
////////////////
// TO-DO #4.2 /////////////////////////////////////////////
// Implement the GPU version of the grayscale conversion //
///////////////////////////////////////////////////////////
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
int offset_out = ((width * y) + x);
int offset = offset_out*3;
float *pixel = &image[offset];
image_out[offset_out] = pixel[0] * 0.0722f + // B
pixel[1] * 0.7152f + // G
pixel[2] * 0.2126f; // R
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13gpu_grayscaleiiPfS_
.globl _Z13gpu_grayscaleiiPfS_
.p2align 8
.type _Z13gpu_grayscaleiiPfS_,@function
_Z13gpu_grayscaleiiPfS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x0
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_mad_u64_u32 v[3:4], null, v1, s4, v[0:1]
s_load_b128 s[0:3], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshl_add_u32 v0, v3, 1, v3
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b96 v[0:2], v[0:1], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v1, 0x3f371759, v1
v_fmamk_f32 v5, v0, 0x3d93dd98, v1
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fmamk_f32 v2, v2, 0x3e59b3d0, v5
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13gpu_grayscaleiiPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13gpu_grayscaleiiPfS_, .Lfunc_end0-_Z13gpu_grayscaleiiPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13gpu_grayscaleiiPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13gpu_grayscaleiiPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_grayscale(int width, int height, float *image, float *image_out)
{
////////////////
// TO-DO #4.2 /////////////////////////////////////////////
// Implement the GPU version of the grayscale conversion //
///////////////////////////////////////////////////////////
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
int offset_out = ((width * y) + x);
int offset = offset_out*3;
float *pixel = &image[offset];
image_out[offset_out] = pixel[0] * 0.0722f + // B
pixel[1] * 0.7152f + // G
pixel[2] * 0.2126f; // R
}
} | .text
.file "gpu_grayscale.hip"
.globl _Z28__device_stub__gpu_grayscaleiiPfS_ # -- Begin function _Z28__device_stub__gpu_grayscaleiiPfS_
.p2align 4, 0x90
.type _Z28__device_stub__gpu_grayscaleiiPfS_,@function
_Z28__device_stub__gpu_grayscaleiiPfS_: # @_Z28__device_stub__gpu_grayscaleiiPfS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13gpu_grayscaleiiPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z28__device_stub__gpu_grayscaleiiPfS_, .Lfunc_end0-_Z28__device_stub__gpu_grayscaleiiPfS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13gpu_grayscaleiiPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13gpu_grayscaleiiPfS_,@object # @_Z13gpu_grayscaleiiPfS_
.section .rodata,"a",@progbits
.globl _Z13gpu_grayscaleiiPfS_
.p2align 3, 0x0
_Z13gpu_grayscaleiiPfS_:
.quad _Z28__device_stub__gpu_grayscaleiiPfS_
.size _Z13gpu_grayscaleiiPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13gpu_grayscaleiiPfS_"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__gpu_grayscaleiiPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13gpu_grayscaleiiPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13gpu_grayscaleiiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x164], PT ; /* 0x0000590003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x160], P0 ; /* 0x0000580000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD R0, R3, c[0x0][0x160], R0 ; /* 0x0000580003007a24 */
/* 0x000fe200078e0200 */
/*00b0*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00d0*/ LEA R2, R0, R0, 0x1 ; /* 0x0000000000027211 */
/* 0x000fcc00078e08ff */
/*00e0*/ IMAD.WIDE R2, R2, R9, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0209 */
/*00f0*/ LDG.E R5, [R2.64+0x4] ; /* 0x0000040402057981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ee8000c1e1900 */
/*0110*/ LDG.E R7, [R2.64+0x8] ; /* 0x0000080402077981 */
/* 0x000f22000c1e1900 */
/*0120*/ FMUL R5, R5, 0.71520000696182250977 ; /* 0x3f37175905057820 */
/* 0x004fc80000400000 */
/*0130*/ FFMA R6, R4, 0.072200000286102294922, R5 ; /* 0x3d93dd9804067823 */
/* 0x008fe40000000005 */
/*0140*/ IMAD.WIDE R4, R0, R9, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fc800078e0209 */
/*0150*/ FFMA R7, R7, 0.21259999275207519531, R6 ; /* 0x3e59b3d007077823 */
/* 0x010fca0000000006 */
/*0160*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0170*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0180*/ BRA 0x180; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13gpu_grayscaleiiPfS_
.globl _Z13gpu_grayscaleiiPfS_
.p2align 8
.type _Z13gpu_grayscaleiiPfS_,@function
_Z13gpu_grayscaleiiPfS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x0
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_mad_u64_u32 v[3:4], null, v1, s4, v[0:1]
s_load_b128 s[0:3], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshl_add_u32 v0, v3, 1, v3
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b96 v[0:2], v[0:1], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v1, 0x3f371759, v1
v_fmamk_f32 v5, v0, 0x3d93dd98, v1
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fmamk_f32 v2, v2, 0x3e59b3d0, v5
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13gpu_grayscaleiiPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13gpu_grayscaleiiPfS_, .Lfunc_end0-_Z13gpu_grayscaleiiPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13gpu_grayscaleiiPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13gpu_grayscaleiiPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000f995c_00000000-6_gpu_grayscale.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_
.type _Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_, @function
_Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13gpu_grayscaleiiPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_, .-_Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_
.globl _Z13gpu_grayscaleiiPfS_
.type _Z13gpu_grayscaleiiPfS_, @function
_Z13gpu_grayscaleiiPfS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z13gpu_grayscaleiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13gpu_grayscaleiiPfS_, .-_Z13gpu_grayscaleiiPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13gpu_grayscaleiiPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13gpu_grayscaleiiPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gpu_grayscale.hip"
.globl _Z28__device_stub__gpu_grayscaleiiPfS_ # -- Begin function _Z28__device_stub__gpu_grayscaleiiPfS_
.p2align 4, 0x90
.type _Z28__device_stub__gpu_grayscaleiiPfS_,@function
_Z28__device_stub__gpu_grayscaleiiPfS_: # @_Z28__device_stub__gpu_grayscaleiiPfS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13gpu_grayscaleiiPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z28__device_stub__gpu_grayscaleiiPfS_, .Lfunc_end0-_Z28__device_stub__gpu_grayscaleiiPfS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13gpu_grayscaleiiPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13gpu_grayscaleiiPfS_,@object # @_Z13gpu_grayscaleiiPfS_
.section .rodata,"a",@progbits
.globl _Z13gpu_grayscaleiiPfS_
.p2align 3, 0x0
_Z13gpu_grayscaleiiPfS_:
.quad _Z28__device_stub__gpu_grayscaleiiPfS_
.size _Z13gpu_grayscaleiiPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13gpu_grayscaleiiPfS_"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__gpu_grayscaleiiPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13gpu_grayscaleiiPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
__global__ void kernel(void) {} // __global__ indicates that the function is to be run on device (GPU)
int main(void) {
kernel<<<1,1>>>(); // <<<1,1>>> are the arguments passed to the host, the arguments to device will be as usual inside ().
printf("Hello, World!\n");
return 0;
} | code for sm_80
Function : _Z6kernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
__global__ void kernel(void) {} // __global__ indicates that the function is to be run on device (GPU)
int main(void) {
kernel<<<1,1>>>(); // <<<1,1>>> are the arguments passed to the host, the arguments to device will be as usual inside ().
printf("Hello, World!\n");
return 0;
} | .file "tmpxft_00027ff6_00000000-6_hello-world.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z24__device_stub__Z6kernelvv
.type _Z24__device_stub__Z6kernelvv, @function
_Z24__device_stub__Z6kernelvv:
.LFB3694:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z6kernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z24__device_stub__Z6kernelvv, .-_Z24__device_stub__Z6kernelvv
.globl _Z6kernelv
.type _Z6kernelv, @function
_Z6kernelv:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z6kernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z6kernelv, .-_Z6kernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello, World!\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z24__device_stub__Z6kernelvv
jmp .L12
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z6kernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
__global__ void kernel(void) {} // __global__ indicates that the function is to be run on device (GPU)
int main(void) {
kernel<<<1,1>>>(); // <<<1,1>>> are the arguments passed to the host, the arguments to device will be as usual inside ().
printf("Hello, World!\n");
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
__global__ void kernel(void) {} // __global__ indicates that the function is to be run on device (GPU)
int main(void) {
kernel<<<1,1>>>(); // <<<1,1>>> are the arguments passed to the host, the arguments to device will be as usual inside ().
printf("Hello, World!\n");
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
__global__ void kernel(void) {} // __global__ indicates that the function is to be run on device (GPU)
int main(void) {
kernel<<<1,1>>>(); // <<<1,1>>> are the arguments passed to the host, the arguments to device will be as usual inside ().
printf("Hello, World!\n");
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelv
.globl _Z6kernelv
.p2align 8
.type _Z6kernelv,@function
_Z6kernelv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelv, .Lfunc_end0-_Z6kernelv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z6kernelv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
__global__ void kernel(void) {} // __global__ indicates that the function is to be run on device (GPU)
int main(void) {
kernel<<<1,1>>>(); // <<<1,1>>> are the arguments passed to the host, the arguments to device will be as usual inside ().
printf("Hello, World!\n");
return 0;
} | .text
.file "hello-world.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z21__device_stub__kernelv # -- Begin function _Z21__device_stub__kernelv
.p2align 4, 0x90
.type _Z21__device_stub__kernelv,@function
_Z21__device_stub__kernelv: # @_Z21__device_stub__kernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z6kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelv, .Lfunc_end0-_Z21__device_stub__kernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z6kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movl $.Lstr, %edi
callq puts@PLT
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelv,@object # @_Z6kernelv
.section .rodata,"a",@progbits
.globl _Z6kernelv
.p2align 3, 0x0
_Z6kernelv:
.quad _Z21__device_stub__kernelv
.size _Z6kernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6kernelv"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello, World!"
.size .Lstr, 14
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6kernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelv
.globl _Z6kernelv
.p2align 8
.type _Z6kernelv,@function
_Z6kernelv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelv, .Lfunc_end0-_Z6kernelv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z6kernelv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00027ff6_00000000-6_hello-world.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z24__device_stub__Z6kernelvv
.type _Z24__device_stub__Z6kernelvv, @function
_Z24__device_stub__Z6kernelvv:
.LFB3694:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z6kernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z24__device_stub__Z6kernelvv, .-_Z24__device_stub__Z6kernelvv
.globl _Z6kernelv
.type _Z6kernelv, @function
_Z6kernelv:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z6kernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z6kernelv, .-_Z6kernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello, World!\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z24__device_stub__Z6kernelvv
jmp .L12
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z6kernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "hello-world.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z21__device_stub__kernelv # -- Begin function _Z21__device_stub__kernelv
.p2align 4, 0x90
.type _Z21__device_stub__kernelv,@function
_Z21__device_stub__kernelv: # @_Z21__device_stub__kernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z6kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelv, .Lfunc_end0-_Z21__device_stub__kernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z6kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movl $.Lstr, %edi
callq puts@PLT
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelv,@object # @_Z6kernelv
.section .rodata,"a",@progbits
.globl _Z6kernelv
.p2align 3, 0x0
_Z6kernelv:
.quad _Z21__device_stub__kernelv
.size _Z6kernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6kernelv"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello, World!"
.size .Lstr, 14
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx == 0)
sizes[idx] = accumulatedSize[0];
else if (idx < size)
{
sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1];
}
} | code for sm_80
Function : _Z28unaccumulatedPartSizesKerneliPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0040*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0050*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fda0003f05270 */
/*0060*/ @!P0 BRA 0x110 ; /* 0x000000a000008947 */
/* 0x000fea0003800000 */
/*0070*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06270 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fd400000001ff */
/*00a0*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fca00078e0205 */
/*00b0*/ LDG.E R0, [R2.64+-0x4] ; /* 0xfffffc0402007981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea2000c1e1900 */
/*00d0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fc800078e0205 */
/*00e0*/ IMAD.IADD R7, R7, 0x1, -R0 ; /* 0x0000000107077824 */
/* 0x004fca00078e0a00 */
/*00f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */
/* 0x000fe200078e00ff */
/*0120*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */
/* 0x000fca0000000f00 */
/*0130*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0140*/ MOV R4, c[0x0][0x170] ; /* 0x00005c0000047a02 */
/* 0x000fe20000000f00 */
/*0150*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff057624 */
/* 0x000fca00078e00ff */
/*0160*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*0170*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0180*/ BRA 0x180; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx == 0)
sizes[idx] = accumulatedSize[0];
else if (idx < size)
{
sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1];
}
} | .file "tmpxft_000528ca_00000000-6_unaccumulatedPartSizesKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_
.type _Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_, @function
_Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z28unaccumulatedPartSizesKerneliPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_, .-_Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_
.globl _Z28unaccumulatedPartSizesKerneliPiS_
.type _Z28unaccumulatedPartSizesKerneliPiS_, @function
_Z28unaccumulatedPartSizesKerneliPiS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z28unaccumulatedPartSizesKerneliPiS_, .-_Z28unaccumulatedPartSizesKerneliPiS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z28unaccumulatedPartSizesKerneliPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z28unaccumulatedPartSizesKerneliPiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx == 0)
sizes[idx] = accumulatedSize[0];
else if (idx < size)
{
sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx == 0)
sizes[idx] = accumulatedSize[0];
else if (idx < size)
{
sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx == 0)
sizes[idx] = accumulatedSize[0];
else if (idx < size)
{
sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z28unaccumulatedPartSizesKerneliPiS_
.globl _Z28unaccumulatedPartSizesKerneliPiS_
.p2align 8
.type _Z28unaccumulatedPartSizesKerneliPiS_,@function
_Z28unaccumulatedPartSizesKerneliPiS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_ne_u32_e32 0, v1
s_xor_b32 s2, exec_lo, s2
s_cbranch_execnz .LBB0_3
s_and_not1_saveexec_b32 s0, s2
s_cbranch_execnz .LBB0_6
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.LBB0_3:
s_load_b32 s0, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s0, v1
s_and_saveexec_b32 s0, vcc_lo
s_cbranch_execz .LBB0_5
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_clause 0x1
global_load_b32 v4, v[2:3], off
global_load_b32 v2, v[2:3], off offset:-4
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v2, v4, v2
global_store_b32 v[0:1], v2, off
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s0
s_and_not1_saveexec_b32 s0, s2
s_cbranch_execz .LBB0_2
.LBB0_6:
v_mov_b32_e32 v0, 0
global_load_b32 v1, v0, s[4:5]
s_waitcnt vmcnt(0)
global_store_b32 v0, v1, s[6:7]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z28unaccumulatedPartSizesKerneliPiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z28unaccumulatedPartSizesKerneliPiS_, .Lfunc_end0-_Z28unaccumulatedPartSizesKerneliPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z28unaccumulatedPartSizesKerneliPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z28unaccumulatedPartSizesKerneliPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx == 0)
sizes[idx] = accumulatedSize[0];
else if (idx < size)
{
sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1];
}
} | .text
.file "unaccumulatedPartSizesKernel.hip"
.globl _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_ # -- Begin function _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_
.p2align 4, 0x90
.type _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_,@function
_Z43__device_stub__unaccumulatedPartSizesKerneliPiS_: # @_Z43__device_stub__unaccumulatedPartSizesKerneliPiS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z28unaccumulatedPartSizesKerneliPiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_, .Lfunc_end0-_Z43__device_stub__unaccumulatedPartSizesKerneliPiS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z28unaccumulatedPartSizesKerneliPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z28unaccumulatedPartSizesKerneliPiS_,@object # @_Z28unaccumulatedPartSizesKerneliPiS_
.section .rodata,"a",@progbits
.globl _Z28unaccumulatedPartSizesKerneliPiS_
.p2align 3, 0x0
_Z28unaccumulatedPartSizesKerneliPiS_:
.quad _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_
.size _Z28unaccumulatedPartSizesKerneliPiS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z28unaccumulatedPartSizesKerneliPiS_"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z28unaccumulatedPartSizesKerneliPiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z28unaccumulatedPartSizesKerneliPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0040*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0050*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fda0003f05270 */
/*0060*/ @!P0 BRA 0x110 ; /* 0x000000a000008947 */
/* 0x000fea0003800000 */
/*0070*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06270 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fd400000001ff */
/*00a0*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fca00078e0205 */
/*00b0*/ LDG.E R0, [R2.64+-0x4] ; /* 0xfffffc0402007981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea2000c1e1900 */
/*00d0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fc800078e0205 */
/*00e0*/ IMAD.IADD R7, R7, 0x1, -R0 ; /* 0x0000000107077824 */
/* 0x004fca00078e0a00 */
/*00f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */
/* 0x000fe200078e00ff */
/*0120*/ MOV R2, c[0x0][0x168] ; /* 0x00005a0000027a02 */
/* 0x000fca0000000f00 */
/*0130*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0140*/ MOV R4, c[0x0][0x170] ; /* 0x00005c0000047a02 */
/* 0x000fe20000000f00 */
/*0150*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff057624 */
/* 0x000fca00078e00ff */
/*0160*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*0170*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0180*/ BRA 0x180; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z28unaccumulatedPartSizesKerneliPiS_
.globl _Z28unaccumulatedPartSizesKerneliPiS_
.p2align 8
.type _Z28unaccumulatedPartSizesKerneliPiS_,@function
_Z28unaccumulatedPartSizesKerneliPiS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_ne_u32_e32 0, v1
s_xor_b32 s2, exec_lo, s2
s_cbranch_execnz .LBB0_3
s_and_not1_saveexec_b32 s0, s2
s_cbranch_execnz .LBB0_6
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.LBB0_3:
s_load_b32 s0, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s0, v1
s_and_saveexec_b32 s0, vcc_lo
s_cbranch_execz .LBB0_5
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_clause 0x1
global_load_b32 v4, v[2:3], off
global_load_b32 v2, v[2:3], off offset:-4
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v2, v4, v2
global_store_b32 v[0:1], v2, off
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s0
s_and_not1_saveexec_b32 s0, s2
s_cbranch_execz .LBB0_2
.LBB0_6:
v_mov_b32_e32 v0, 0
global_load_b32 v1, v0, s[4:5]
s_waitcnt vmcnt(0)
global_store_b32 v0, v1, s[6:7]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z28unaccumulatedPartSizesKerneliPiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z28unaccumulatedPartSizesKerneliPiS_, .Lfunc_end0-_Z28unaccumulatedPartSizesKerneliPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z28unaccumulatedPartSizesKerneliPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z28unaccumulatedPartSizesKerneliPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000528ca_00000000-6_unaccumulatedPartSizesKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_
.type _Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_, @function
_Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z28unaccumulatedPartSizesKerneliPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_, .-_Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_
.globl _Z28unaccumulatedPartSizesKerneliPiS_
.type _Z28unaccumulatedPartSizesKerneliPiS_, @function
_Z28unaccumulatedPartSizesKerneliPiS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z28unaccumulatedPartSizesKerneliPiS_iPiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z28unaccumulatedPartSizesKerneliPiS_, .-_Z28unaccumulatedPartSizesKerneliPiS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z28unaccumulatedPartSizesKerneliPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z28unaccumulatedPartSizesKerneliPiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "unaccumulatedPartSizesKernel.hip"
.globl _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_ # -- Begin function _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_
.p2align 4, 0x90
.type _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_,@function
_Z43__device_stub__unaccumulatedPartSizesKerneliPiS_: # @_Z43__device_stub__unaccumulatedPartSizesKerneliPiS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z28unaccumulatedPartSizesKerneliPiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_, .Lfunc_end0-_Z43__device_stub__unaccumulatedPartSizesKerneliPiS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z28unaccumulatedPartSizesKerneliPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z28unaccumulatedPartSizesKerneliPiS_,@object # @_Z28unaccumulatedPartSizesKerneliPiS_
.section .rodata,"a",@progbits
.globl _Z28unaccumulatedPartSizesKerneliPiS_
.p2align 3, 0x0
_Z28unaccumulatedPartSizesKerneliPiS_:
.quad _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_
.size _Z28unaccumulatedPartSizesKerneliPiS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z28unaccumulatedPartSizesKerneliPiS_"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z43__device_stub__unaccumulatedPartSizesKerneliPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z28unaccumulatedPartSizesKerneliPiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "stdio.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
__constant__ int dim;
__global__ void test(int *gpu_Num){
*gpu_Num = dim;
}
int main(int argc, char* argv[])
{
int num = 25;
cudaMemcpyToSymbol(dim,&num,sizeof(int),0,cudaMemcpyHostToDevice);
int *gpu_Num;
cudaMalloc(&gpu_Num,sizeof(int));
test<<<1,1>>>(gpu_Num);
int hostResult;
cudaMemcpy(&hostResult,gpu_Num,sizeof(int),cudaMemcpyDefault);
printf("Result: %i\n",hostResult);
} | code for sm_80
Function : _Z4testPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x3][0x0] ; /* 0x00c00000ff057624 */
/* 0x000fe200078e00ff */
/*0020*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */
/* 0x000fca0000000f00 */
/*0050*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0060*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0070*/ BRA 0x70; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "stdio.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
__constant__ int dim;
__global__ void test(int *gpu_Num){
*gpu_Num = dim;
}
int main(int argc, char* argv[])
{
int num = 25;
cudaMemcpyToSymbol(dim,&num,sizeof(int),0,cudaMemcpyHostToDevice);
int *gpu_Num;
cudaMalloc(&gpu_Num,sizeof(int));
test<<<1,1>>>(gpu_Num);
int hostResult;
cudaMemcpy(&hostResult,gpu_Num,sizeof(int),cudaMemcpyDefault);
printf("Result: %i\n",hostResult);
} | .file "tmpxft_0016eada_00000000-6_tosymbol.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z23__device_stub__Z4testPiPi
.type _Z23__device_stub__Z4testPiPi, @function
_Z23__device_stub__Z4testPiPi:
.LFB3694:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4testPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z23__device_stub__Z4testPiPi, .-_Z23__device_stub__Z4testPiPi
.globl _Z4testPi
.type _Z4testPi, @function
_Z4testPi:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z4testPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z4testPi, .-_Z4testPi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Result: %i\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $25, 4(%rsp)
leaq 4(%rsp), %rsi
movl $1, %r8d
movl $0, %ecx
movl $4, %edx
leaq _ZL3dim(%rip), %rdi
call cudaMemcpyToSymbol@PLT
leaq 8(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 28(%rsp), %rdi
movl $4, %ecx
movl $4, %edx
movq 8(%rsp), %rsi
call cudaMemcpy@PLT
movl 28(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 8(%rsp), %rdi
call _Z23__device_stub__Z4testPiPi
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z4testPi"
.LC2:
.string "dim"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z4testPi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL3dim(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL3dim
.comm _ZL3dim,4,4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "stdio.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
__constant__ int dim;
__global__ void test(int *gpu_Num){
*gpu_Num = dim;
}
int main(int argc, char* argv[])
{
int num = 25;
cudaMemcpyToSymbol(dim,&num,sizeof(int),0,cudaMemcpyHostToDevice);
int *gpu_Num;
cudaMalloc(&gpu_Num,sizeof(int));
test<<<1,1>>>(gpu_Num);
int hostResult;
cudaMemcpy(&hostResult,gpu_Num,sizeof(int),cudaMemcpyDefault);
printf("Result: %i\n",hostResult);
} | #include "stdio.h"
#include <iostream>
#include <hip/hip_runtime.h>
__constant__ int dim;
__global__ void test(int *gpu_Num){
*gpu_Num = dim;
}
int main(int argc, char* argv[])
{
int num = 25;
hipMemcpyToSymbol(HIP_SYMBOL(dim),&num,sizeof(int),0,hipMemcpyHostToDevice);
int *gpu_Num;
hipMalloc(&gpu_Num,sizeof(int));
test<<<1,1>>>(gpu_Num);
int hostResult;
hipMemcpy(&hostResult,gpu_Num,sizeof(int),hipMemcpyDefault);
printf("Result: %i\n",hostResult);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "stdio.h"
#include <iostream>
#include <hip/hip_runtime.h>
__constant__ int dim;
__global__ void test(int *gpu_Num){
*gpu_Num = dim;
}
int main(int argc, char* argv[])
{
int num = 25;
hipMemcpyToSymbol(HIP_SYMBOL(dim),&num,sizeof(int),0,hipMemcpyHostToDevice);
int *gpu_Num;
hipMalloc(&gpu_Num,sizeof(int));
test<<<1,1>>>(gpu_Num);
int hostResult;
hipMemcpy(&hostResult,gpu_Num,sizeof(int),hipMemcpyDefault);
printf("Result: %i\n",hostResult);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4testPi
.globl _Z4testPi
.p2align 8
.type _Z4testPi,@function
_Z4testPi:
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, dim@rel32@lo+4
s_addc_u32 s3, s3, dim@rel32@hi+12
s_load_b32 s2, s[2:3], 0x0
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4testPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4testPi, .Lfunc_end0-_Z4testPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected dim
.type dim,@object
.section .bss,"aw",@nobits
.globl dim
.p2align 2, 0x0
dim:
.long 0
.size dim, 4
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym dim
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4testPi
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z4testPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "stdio.h"
#include <iostream>
#include <hip/hip_runtime.h>
__constant__ int dim;
__global__ void test(int *gpu_Num){
*gpu_Num = dim;
}
int main(int argc, char* argv[])
{
int num = 25;
hipMemcpyToSymbol(HIP_SYMBOL(dim),&num,sizeof(int),0,hipMemcpyHostToDevice);
int *gpu_Num;
hipMalloc(&gpu_Num,sizeof(int));
test<<<1,1>>>(gpu_Num);
int hostResult;
hipMemcpy(&hostResult,gpu_Num,sizeof(int),hipMemcpyDefault);
printf("Result: %i\n",hostResult);
} | .text
.file "tosymbol.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z19__device_stub__testPi # -- Begin function _Z19__device_stub__testPi
.p2align 4, 0x90
.type _Z19__device_stub__testPi,@function
_Z19__device_stub__testPi: # @_Z19__device_stub__testPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z4testPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z19__device_stub__testPi, .Lfunc_end0-_Z19__device_stub__testPi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movl $25, 4(%rsp)
leaq 4(%rsp), %rsi
movl $dim, %edi
movl $4, %edx
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 24(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 32(%rsp)
leaq 8(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 8(%rsp), %rsi
movl 16(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z4testPi, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 24(%rsp), %rsi
leaq 8(%rsp), %rdi
movl $4, %edx
movl $4, %ecx
callq hipMemcpy
movl 8(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4testPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $dim, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type dim,@object # @dim
.local dim
.comm dim,4,4
.type _Z4testPi,@object # @_Z4testPi
.section .rodata,"a",@progbits
.globl _Z4testPi
.p2align 3, 0x0
_Z4testPi:
.quad _Z19__device_stub__testPi
.size _Z4testPi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Result: %i\n"
.size .L.str, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4testPi"
.size .L__unnamed_1, 10
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "dim"
.size .L__unnamed_2, 4
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__testPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym dim
.addrsig_sym _Z4testPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z4testPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x3][0x0] ; /* 0x00c00000ff057624 */
/* 0x000fe200078e00ff */
/*0020*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */
/* 0x000fca0000000f00 */
/*0050*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0060*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0070*/ BRA 0x70; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4testPi
.globl _Z4testPi
.p2align 8
.type _Z4testPi,@function
_Z4testPi:
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, dim@rel32@lo+4
s_addc_u32 s3, s3, dim@rel32@hi+12
s_load_b32 s2, s[2:3], 0x0
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4testPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4testPi, .Lfunc_end0-_Z4testPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected dim
.type dim,@object
.section .bss,"aw",@nobits
.globl dim
.p2align 2, 0x0
dim:
.long 0
.size dim, 4
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym dim
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4testPi
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z4testPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0016eada_00000000-6_tosymbol.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z23__device_stub__Z4testPiPi
.type _Z23__device_stub__Z4testPiPi, @function
_Z23__device_stub__Z4testPiPi:
.LFB3694:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4testPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z23__device_stub__Z4testPiPi, .-_Z23__device_stub__Z4testPiPi
.globl _Z4testPi
.type _Z4testPi, @function
_Z4testPi:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z4testPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z4testPi, .-_Z4testPi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Result: %i\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $25, 4(%rsp)
leaq 4(%rsp), %rsi
movl $1, %r8d
movl $0, %ecx
movl $4, %edx
leaq _ZL3dim(%rip), %rdi
call cudaMemcpyToSymbol@PLT
leaq 8(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 28(%rsp), %rdi
movl $4, %ecx
movl $4, %edx
movq 8(%rsp), %rsi
call cudaMemcpy@PLT
movl 28(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 8(%rsp), %rdi
call _Z23__device_stub__Z4testPiPi
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z4testPi"
.LC2:
.string "dim"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z4testPi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL3dim(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL3dim
.comm _ZL3dim,4,4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "tosymbol.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z19__device_stub__testPi # -- Begin function _Z19__device_stub__testPi
.p2align 4, 0x90
.type _Z19__device_stub__testPi,@function
_Z19__device_stub__testPi: # @_Z19__device_stub__testPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z4testPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z19__device_stub__testPi, .Lfunc_end0-_Z19__device_stub__testPi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movl $25, 4(%rsp)
leaq 4(%rsp), %rsi
movl $dim, %edi
movl $4, %edx
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 24(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 32(%rsp)
leaq 8(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 8(%rsp), %rsi
movl 16(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z4testPi, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 24(%rsp), %rsi
leaq 8(%rsp), %rdi
movl $4, %edx
movl $4, %ecx
callq hipMemcpy
movl 8(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4testPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $dim, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type dim,@object # @dim
.local dim
.comm dim,4,4
.type _Z4testPi,@object # @_Z4testPi
.section .rodata,"a",@progbits
.globl _Z4testPi
.p2align 3, 0x0
_Z4testPi:
.quad _Z19__device_stub__testPi
.size _Z4testPi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Result: %i\n"
.size .L.str, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4testPi"
.size .L__unnamed_1, 10
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "dim"
.size .L__unnamed_2, 4
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__testPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym dim
.addrsig_sym _Z4testPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
//function definition
__global__ void Fun1(int *a, int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun2(int *a, int *b, int *c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun3(int *a, int *b, int *c, int n)
{
int idx = threadIdx.x;
int id = blockIdx.x * blockDim.x;
idx += id;
if(idx < n)
c[idx] = a[idx] + b[idx];
}
//program
int main()
{
int a[20], b[20], c[20], n, i;
printf("Enter n ");
scanf("%d", &n);
printf("\nEnter set 1\n");
for(i = 0; i < n; i++)
scanf("%d", &a[i]);
printf("Enter set 2\n");
for(i = 0; i < n; i++)
scanf("%d", &b[i]);
int *d_a, *d_b, *d_c, *d_d, *d_e;
int size = sizeof(int) * 20;
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cudaMalloc((void**)&d_d, size);
cudaMalloc((void**)&d_e, size);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
//func def
Fun1<<<n, 1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("Res 1\n");
for(i = 0; i < n; i++)
printf("%d \n", c[i]);
int d[20];
//func def
Fun2<<<1, n>>>(d_a, d_b, d_d);
cudaMemcpy(&d, d_d, size, cudaMemcpyDeviceToHost);
printf("Res 2\n");
for(i = 0; i < n; i++)
printf("%d \n", d[i]);
//init
int e[20];
int thread = 256;
int xyz = (int)(n / thread);
//func def
Fun3<<<xyz, 256>>>(d_a, d_b, d_e, n);
cudaMemcpy(&e, d_e, size, cudaMemcpyDeviceToHost);
printf("Res 3\n");
for(i = 0; i < n; i++)
printf("%d \n", e[i]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_d);
return 0;
} | code for sm_80
Function : _Z4Fun3PiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z4Fun2PiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z4Fun1PiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
//function definition
__global__ void Fun1(int *a, int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun2(int *a, int *b, int *c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun3(int *a, int *b, int *c, int n)
{
int idx = threadIdx.x;
int id = blockIdx.x * blockDim.x;
idx += id;
if(idx < n)
c[idx] = a[idx] + b[idx];
}
//program
int main()
{
int a[20], b[20], c[20], n, i;
printf("Enter n ");
scanf("%d", &n);
printf("\nEnter set 1\n");
for(i = 0; i < n; i++)
scanf("%d", &a[i]);
printf("Enter set 2\n");
for(i = 0; i < n; i++)
scanf("%d", &b[i]);
int *d_a, *d_b, *d_c, *d_d, *d_e;
int size = sizeof(int) * 20;
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cudaMalloc((void**)&d_d, size);
cudaMalloc((void**)&d_e, size);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
//func def
Fun1<<<n, 1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("Res 1\n");
for(i = 0; i < n; i++)
printf("%d \n", c[i]);
int d[20];
//func def
Fun2<<<1, n>>>(d_a, d_b, d_d);
cudaMemcpy(&d, d_d, size, cudaMemcpyDeviceToHost);
printf("Res 2\n");
for(i = 0; i < n; i++)
printf("%d \n", d[i]);
//init
int e[20];
int thread = 256;
int xyz = (int)(n / thread);
//func def
Fun3<<<xyz, 256>>>(d_a, d_b, d_e, n);
cudaMemcpy(&e, d_e, size, cudaMemcpyDeviceToHost);
printf("Res 3\n");
for(i = 0; i < n; i++)
printf("%d \n", e[i]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_d);
return 0;
} | .file "tmpxft_00148e36_00000000-6_q1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z4Fun1PiS_S_PiS_S_
.type _Z27__device_stub__Z4Fun1PiS_S_PiS_S_, @function
_Z27__device_stub__Z4Fun1PiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4Fun1PiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z4Fun1PiS_S_PiS_S_, .-_Z27__device_stub__Z4Fun1PiS_S_PiS_S_
.globl _Z4Fun1PiS_S_
.type _Z4Fun1PiS_S_, @function
_Z4Fun1PiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z4Fun1PiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z4Fun1PiS_S_, .-_Z4Fun1PiS_S_
.globl _Z27__device_stub__Z4Fun2PiS_S_PiS_S_
.type _Z27__device_stub__Z4Fun2PiS_S_PiS_S_, @function
_Z27__device_stub__Z4Fun2PiS_S_PiS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4Fun2PiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z27__device_stub__Z4Fun2PiS_S_PiS_S_, .-_Z27__device_stub__Z4Fun2PiS_S_PiS_S_
.globl _Z4Fun2PiS_S_
.type _Z4Fun2PiS_S_, @function
_Z4Fun2PiS_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z4Fun2PiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z4Fun2PiS_S_, .-_Z4Fun2PiS_S_
.globl _Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i
.type _Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i, @function
_Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i:
.LFB2086:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4Fun3PiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i, .-_Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i
.globl _Z4Fun3PiS_S_i
.type _Z4Fun3PiS_S_i, @function
_Z4Fun3PiS_S_i:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z4Fun3PiS_S_i, .-_Z4Fun3PiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Enter n "
.LC1:
.string "%d"
.LC2:
.string "\nEnter set 1\n"
.LC3:
.string "Enter set 2\n"
.LC4:
.string "Res 1\n"
.LC5:
.string "%d \n"
.LC6:
.string "Res 2\n"
.LC7:
.string "Res 3\n"
.LC8:
.string "%d \n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $496, %rsp
.cfi_def_cfa_offset 528
movq %fs:40, %rax
movq %rax, 488(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
leaq 12(%rsp), %rsi
leaq .LC1(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L28
leaq 80(%rsp), %rbp
movl $0, %ebx
leaq .LC1(%rip), %r12
.L29:
movq %rbp, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %ebx, 12(%rsp)
jg .L29
.L28:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L30
leaq 160(%rsp), %rbp
movl $0, %ebx
leaq .LC1(%rip), %r12
.L31:
movq %rbp, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %ebx, 12(%rsp)
jg .L31
.L30:
leaq 16(%rsp), %rdi
movl $80, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $80, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $80, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $80, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $80, %esi
call cudaMalloc@PLT
leaq 80(%rsp), %rsi
movl $1, %ecx
movl $80, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 160(%rsp), %rsi
movl $1, %ecx
movl $80, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl 12(%rsp), %eax
movl %eax, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 68(%rsp), %rdx
movl $1, %ecx
movq 56(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L48
.L32:
leaq 240(%rsp), %rdi
movl $2, %ecx
movl $80, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rsp), %eax
testl %eax, %eax
jle .L33
movl $0, %ebx
leaq .LC5(%rip), %rbp
.L34:
movl 240(%rsp,%rbx,4), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rsp), %eax
addq $1, %rbx
cmpl %ebx, %eax
jg .L34
.L33:
movl %eax, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 68(%rsp), %rdx
movl $1, %ecx
movq 56(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L49
.L35:
leaq 320(%rsp), %rdi
movl $2, %ecx
movl $80, %edx
movq 40(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rsp), %eax
testl %eax, %eax
jle .L36
movl $0, %ebx
leaq .LC5(%rip), %rbp
.L37:
movl 320(%rsp,%rbx,4), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rsp), %eax
addq $1, %rbx
cmpl %ebx, %eax
jg .L37
.L36:
movl $256, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
leal 255(%rax), %edx
testl %eax, %eax
cmovs %edx, %eax
sarl $8, %eax
movl %eax, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 68(%rsp), %rdx
movl $1, %ecx
movq 56(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L50
.L38:
leaq 400(%rsp), %rdi
movl $2, %ecx
movl $80, %edx
movq 48(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L39
movl $0, %ebx
leaq .LC8(%rip), %rbp
.L40:
movl 400(%rsp,%rbx,4), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpl %ebx, 12(%rsp)
jg .L40
.L39:
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 488(%rsp), %rax
subq %fs:40, %rax
jne .L51
movl $0, %eax
addq $496, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z27__device_stub__Z4Fun1PiS_S_PiS_S_
jmp .L32
.L49:
movq 40(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z27__device_stub__Z4Fun2PiS_S_PiS_S_
jmp .L35
.L50:
movl 12(%rsp), %ecx
movq 48(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i
jmp .L38
.L51:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC9:
.string "_Z4Fun3PiS_S_i"
.LC10:
.string "_Z4Fun2PiS_S_"
.LC11:
.string "_Z4Fun1PiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2089:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z4Fun3PiS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z4Fun2PiS_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z4Fun1PiS_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
//function definition
__global__ void Fun1(int *a, int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun2(int *a, int *b, int *c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun3(int *a, int *b, int *c, int n)
{
int idx = threadIdx.x;
int id = blockIdx.x * blockDim.x;
idx += id;
if(idx < n)
c[idx] = a[idx] + b[idx];
}
//program
int main()
{
int a[20], b[20], c[20], n, i;
printf("Enter n ");
scanf("%d", &n);
printf("\nEnter set 1\n");
for(i = 0; i < n; i++)
scanf("%d", &a[i]);
printf("Enter set 2\n");
for(i = 0; i < n; i++)
scanf("%d", &b[i]);
int *d_a, *d_b, *d_c, *d_d, *d_e;
int size = sizeof(int) * 20;
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cudaMalloc((void**)&d_d, size);
cudaMalloc((void**)&d_e, size);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
//func def
Fun1<<<n, 1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("Res 1\n");
for(i = 0; i < n; i++)
printf("%d \n", c[i]);
int d[20];
//func def
Fun2<<<1, n>>>(d_a, d_b, d_d);
cudaMemcpy(&d, d_d, size, cudaMemcpyDeviceToHost);
printf("Res 2\n");
for(i = 0; i < n; i++)
printf("%d \n", d[i]);
//init
int e[20];
int thread = 256;
int xyz = (int)(n / thread);
//func def
Fun3<<<xyz, 256>>>(d_a, d_b, d_e, n);
cudaMemcpy(&e, d_e, size, cudaMemcpyDeviceToHost);
printf("Res 3\n");
for(i = 0; i < n; i++)
printf("%d \n", e[i]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_d);
return 0;
} | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
//function definition
__global__ void Fun1(int *a, int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun2(int *a, int *b, int *c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun3(int *a, int *b, int *c, int n)
{
int idx = threadIdx.x;
int id = blockIdx.x * blockDim.x;
idx += id;
if(idx < n)
c[idx] = a[idx] + b[idx];
}
//program
int main()
{
int a[20], b[20], c[20], n, i;
printf("Enter n ");
scanf("%d", &n);
printf("\nEnter set 1\n");
for(i = 0; i < n; i++)
scanf("%d", &a[i]);
printf("Enter set 2\n");
for(i = 0; i < n; i++)
scanf("%d", &b[i]);
int *d_a, *d_b, *d_c, *d_d, *d_e;
int size = sizeof(int) * 20;
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
hipMalloc((void**)&d_d, size);
hipMalloc((void**)&d_e, size);
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
//func def
Fun1<<<n, 1>>>(d_a, d_b, d_c);
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("Res 1\n");
for(i = 0; i < n; i++)
printf("%d \n", c[i]);
int d[20];
//func def
Fun2<<<1, n>>>(d_a, d_b, d_d);
hipMemcpy(&d, d_d, size, hipMemcpyDeviceToHost);
printf("Res 2\n");
for(i = 0; i < n; i++)
printf("%d \n", d[i]);
//init
int e[20];
int thread = 256;
int xyz = (int)(n / thread);
//func def
Fun3<<<xyz, 256>>>(d_a, d_b, d_e, n);
hipMemcpy(&e, d_e, size, hipMemcpyDeviceToHost);
printf("Res 3\n");
for(i = 0; i < n; i++)
printf("%d \n", e[i]);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_d);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
//function definition
__global__ void Fun1(int *a, int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun2(int *a, int *b, int *c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun3(int *a, int *b, int *c, int n)
{
int idx = threadIdx.x;
int id = blockIdx.x * blockDim.x;
idx += id;
if(idx < n)
c[idx] = a[idx] + b[idx];
}
//program
int main()
{
int a[20], b[20], c[20], n, i;
printf("Enter n ");
scanf("%d", &n);
printf("\nEnter set 1\n");
for(i = 0; i < n; i++)
scanf("%d", &a[i]);
printf("Enter set 2\n");
for(i = 0; i < n; i++)
scanf("%d", &b[i]);
int *d_a, *d_b, *d_c, *d_d, *d_e;
int size = sizeof(int) * 20;
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
hipMalloc((void**)&d_d, size);
hipMalloc((void**)&d_e, size);
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
//func def
Fun1<<<n, 1>>>(d_a, d_b, d_c);
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("Res 1\n");
for(i = 0; i < n; i++)
printf("%d \n", c[i]);
int d[20];
//func def
Fun2<<<1, n>>>(d_a, d_b, d_d);
hipMemcpy(&d, d_d, size, hipMemcpyDeviceToHost);
printf("Res 2\n");
for(i = 0; i < n; i++)
printf("%d \n", d[i]);
//init
int e[20];
int thread = 256;
int xyz = (int)(n / thread);
//func def
Fun3<<<xyz, 256>>>(d_a, d_b, d_e, n);
hipMemcpy(&e, d_e, size, hipMemcpyDeviceToHost);
printf("Res 3\n");
for(i = 0; i < n; i++)
printf("%d \n", e[i]);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_d);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4Fun1PiS_S_
.globl _Z4Fun1PiS_S_
.p2align 8
.type _Z4Fun1PiS_S_,@function
_Z4Fun1PiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s2, s15
s_ashr_i32 s3, s15, 31
s_load_b64 s[0:1], s[0:1], 0x10
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s4, s2
s_addc_u32 s5, s5, s3
s_add_u32 s6, s6, s2
s_addc_u32 s7, s7, s3
s_load_b32 s4, s[4:5], 0x0
s_load_b32 s5, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s4, s5, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4Fun1PiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4Fun1PiS_S_, .Lfunc_end0-_Z4Fun1PiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4Fun2PiS_S_
.globl _Z4Fun2PiS_S_
.p2align 8
.type _Z4Fun2PiS_S_,@function
_Z4Fun2PiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4Fun2PiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z4Fun2PiS_S_, .Lfunc_end1-_Z4Fun2PiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4Fun3PiS_S_i
.globl _Z4Fun3PiS_S_i
.p2align 8
.type _Z4Fun3PiS_S_i,@function
_Z4Fun3PiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB2_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB2_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4Fun3PiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z4Fun3PiS_S_i, .Lfunc_end2-_Z4Fun3PiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4Fun1PiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z4Fun1PiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4Fun2PiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z4Fun2PiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4Fun3PiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4Fun3PiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
//function definition
__global__ void Fun1(int *a, int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun2(int *a, int *b, int *c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void Fun3(int *a, int *b, int *c, int n)
{
int idx = threadIdx.x;
int id = blockIdx.x * blockDim.x;
idx += id;
if(idx < n)
c[idx] = a[idx] + b[idx];
}
//program
int main()
{
int a[20], b[20], c[20], n, i;
printf("Enter n ");
scanf("%d", &n);
printf("\nEnter set 1\n");
for(i = 0; i < n; i++)
scanf("%d", &a[i]);
printf("Enter set 2\n");
for(i = 0; i < n; i++)
scanf("%d", &b[i]);
int *d_a, *d_b, *d_c, *d_d, *d_e;
int size = sizeof(int) * 20;
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
hipMalloc((void**)&d_d, size);
hipMalloc((void**)&d_e, size);
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
//func def
Fun1<<<n, 1>>>(d_a, d_b, d_c);
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("Res 1\n");
for(i = 0; i < n; i++)
printf("%d \n", c[i]);
int d[20];
//func def
Fun2<<<1, n>>>(d_a, d_b, d_d);
hipMemcpy(&d, d_d, size, hipMemcpyDeviceToHost);
printf("Res 2\n");
for(i = 0; i < n; i++)
printf("%d \n", d[i]);
//init
int e[20];
int thread = 256;
int xyz = (int)(n / thread);
//func def
Fun3<<<xyz, 256>>>(d_a, d_b, d_e, n);
hipMemcpy(&e, d_e, size, hipMemcpyDeviceToHost);
printf("Res 3\n");
for(i = 0; i < n; i++)
printf("%d \n", e[i]);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_d);
return 0;
} | .text
.file "q1.hip"
.globl _Z19__device_stub__Fun1PiS_S_ # -- Begin function _Z19__device_stub__Fun1PiS_S_
.p2align 4, 0x90
.type _Z19__device_stub__Fun1PiS_S_,@function
_Z19__device_stub__Fun1PiS_S_: # @_Z19__device_stub__Fun1PiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4Fun1PiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z19__device_stub__Fun1PiS_S_, .Lfunc_end0-_Z19__device_stub__Fun1PiS_S_
.cfi_endproc
# -- End function
.globl _Z19__device_stub__Fun2PiS_S_ # -- Begin function _Z19__device_stub__Fun2PiS_S_
.p2align 4, 0x90
.type _Z19__device_stub__Fun2PiS_S_,@function
_Z19__device_stub__Fun2PiS_S_: # @_Z19__device_stub__Fun2PiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4Fun2PiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z19__device_stub__Fun2PiS_S_, .Lfunc_end1-_Z19__device_stub__Fun2PiS_S_
.cfi_endproc
# -- End function
.globl _Z19__device_stub__Fun3PiS_S_i # -- Begin function _Z19__device_stub__Fun3PiS_S_i
.p2align 4, 0x90
.type _Z19__device_stub__Fun3PiS_S_i,@function
_Z19__device_stub__Fun3PiS_S_i: # @_Z19__device_stub__Fun3PiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4Fun3PiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z19__device_stub__Fun3PiS_S_i, .Lfunc_end2-_Z19__device_stub__Fun3PiS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 560
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $.L.str, %edi
xorl %eax, %eax
callq printf
leaq 4(%rsp), %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl $.Lstr, %edi
callq puts@PLT
cmpl $0, 4(%rsp)
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
leaq 448(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq __isoc23_scanf
incq %r14
movslq 4(%rsp), %rax
addq $4, %rbx
cmpq %rax, %r14
jl .LBB3_2
.LBB3_3: # %._crit_edge
movl $.Lstr.1, %edi
callq puts@PLT
cmpl $0, 4(%rsp)
jle .LBB3_6
# %bb.4: # %.lr.ph74.preheader
leaq 368(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_5: # %.lr.ph74
# =>This Inner Loop Header: Depth=1
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq __isoc23_scanf
incq %r14
movslq 4(%rsp), %rax
addq $4, %rbx
cmpq %rax, %r14
jl .LBB3_5
.LBB3_6: # %._crit_edge75
movabsq $4294967296, %rbx # imm = 0x100000000
leaq 16(%rsp), %rdi
movl $80, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $80, %esi
callq hipMalloc
leaq 88(%rsp), %rdi
movl $80, %esi
callq hipMalloc
leaq 80(%rsp), %rdi
movl $80, %esi
callq hipMalloc
leaq 112(%rsp), %rdi
movl $80, %esi
callq hipMalloc
movq 16(%rsp), %rdi
leaq 448(%rsp), %rsi
movl $80, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 368(%rsp), %rsi
movl $80, %edx
movl $1, %ecx
callq hipMemcpy
movl 4(%rsp), %edi
orq %rbx, %rdi
leaq 1(%rbx), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_8
# %bb.7:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq 88(%rsp), %rdx
movq %rax, 48(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 40(%rsp)
leaq 48(%rsp), %rax
movq %rax, 288(%rsp)
leaq 64(%rsp), %rax
movq %rax, 296(%rsp)
leaq 40(%rsp), %rax
movq %rax, 304(%rsp)
leaq 208(%rsp), %rdi
leaq 128(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 208(%rsp), %rsi
movl 216(%rsp), %edx
movq 128(%rsp), %rcx
movl 136(%rsp), %r8d
leaq 288(%rsp), %r9
movl $_Z4Fun1PiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_8:
movq 88(%rsp), %rsi
leaq 288(%rsp), %rdi
movl $80, %edx
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.2, %edi
callq puts@PLT
movl 4(%rsp), %eax
testl %eax, %eax
jle .LBB3_11
# %bb.9: # %.lr.ph78.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_10: # %.lr.ph78
# =>This Inner Loop Header: Depth=1
movl 288(%rsp,%r14,4), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
incq %r14
movslq 4(%rsp), %rax
cmpq %rax, %r14
jl .LBB3_10
.LBB3_11: # %._crit_edge79
movl %eax, %edx
orq %rbx, %rdx
leaq 1(%rbx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_13
# %bb.12:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq 80(%rsp), %rdx
movq %rax, 64(%rsp)
movq %rcx, 40(%rsp)
movq %rdx, 32(%rsp)
leaq 64(%rsp), %rax
movq %rax, 208(%rsp)
leaq 40(%rsp), %rax
movq %rax, 216(%rsp)
leaq 32(%rsp), %rax
movq %rax, 224(%rsp)
leaq 128(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 208(%rsp), %r9
movl $_Z4Fun2PiS_S_, %edi
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_13:
movq 80(%rsp), %rsi
leaq 208(%rsp), %rdi
movl $80, %edx
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.3, %edi
callq puts@PLT
movl 4(%rsp), %eax
testl %eax, %eax
jle .LBB3_16
# %bb.14: # %.lr.ph82.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_15: # %.lr.ph82
# =>This Inner Loop Header: Depth=1
movl 208(%rsp,%r14,4), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
incq %r14
movslq 4(%rsp), %rax
cmpq %rax, %r14
jl .LBB3_15
.LBB3_16: # %._crit_edge83
leal 255(%rax), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $8, %edi
orq %rbx, %rdi
addq $256, %rbx # imm = 0x100
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_18
# %bb.17:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq 112(%rsp), %rdx
movl 4(%rsp), %esi
movq %rax, 40(%rsp)
movq %rcx, 32(%rsp)
movq %rdx, 24(%rsp)
movl %esi, 108(%rsp)
leaq 40(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
leaq 108(%rsp), %rax
movq %rax, 152(%rsp)
leaq 48(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z4Fun3PiS_S_i, %edi
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_18:
movq 112(%rsp), %rsi
leaq 128(%rsp), %rdi
movl $80, %edx
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.4, %edi
callq puts@PLT
cmpl $0, 4(%rsp)
jle .LBB3_21
# %bb.19: # %.lr.ph87.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB3_20: # %.lr.ph87
# =>This Inner Loop Header: Depth=1
movl 128(%rsp,%rbx,4), %esi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
incq %rbx
movslq 4(%rsp), %rax
cmpq %rax, %rbx
jl .LBB3_20
.LBB3_21: # %._crit_edge88
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq 80(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4Fun1PiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4Fun2PiS_S_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4Fun3PiS_S_i, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4Fun1PiS_S_,@object # @_Z4Fun1PiS_S_
.section .rodata,"a",@progbits
.globl _Z4Fun1PiS_S_
.p2align 3, 0x0
_Z4Fun1PiS_S_:
.quad _Z19__device_stub__Fun1PiS_S_
.size _Z4Fun1PiS_S_, 8
.type _Z4Fun2PiS_S_,@object # @_Z4Fun2PiS_S_
.globl _Z4Fun2PiS_S_
.p2align 3, 0x0
_Z4Fun2PiS_S_:
.quad _Z19__device_stub__Fun2PiS_S_
.size _Z4Fun2PiS_S_, 8
.type _Z4Fun3PiS_S_i,@object # @_Z4Fun3PiS_S_i
.globl _Z4Fun3PiS_S_i
.p2align 3, 0x0
_Z4Fun3PiS_S_i:
.quad _Z19__device_stub__Fun3PiS_S_i
.size _Z4Fun3PiS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter n "
.size .L.str, 9
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%d \n"
.size .L.str.5, 6
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%d \n"
.size .L.str.8, 5
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4Fun1PiS_S_"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z4Fun2PiS_S_"
.size .L__unnamed_2, 14
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z4Fun3PiS_S_i"
.size .L__unnamed_3, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\nEnter set 1"
.size .Lstr, 13
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Enter set 2"
.size .Lstr.1, 12
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Res 1"
.size .Lstr.2, 6
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Res 2"
.size .Lstr.3, 6
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "Res 3"
.size .Lstr.4, 6
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__Fun1PiS_S_
.addrsig_sym _Z19__device_stub__Fun2PiS_S_
.addrsig_sym _Z19__device_stub__Fun3PiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4Fun1PiS_S_
.addrsig_sym _Z4Fun2PiS_S_
.addrsig_sym _Z4Fun3PiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z4Fun3PiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z4Fun2PiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z4Fun1PiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4Fun1PiS_S_
.globl _Z4Fun1PiS_S_
.p2align 8
.type _Z4Fun1PiS_S_,@function
_Z4Fun1PiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s2, s15
s_ashr_i32 s3, s15, 31
s_load_b64 s[0:1], s[0:1], 0x10
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s4, s2
s_addc_u32 s5, s5, s3
s_add_u32 s6, s6, s2
s_addc_u32 s7, s7, s3
s_load_b32 s4, s[4:5], 0x0
s_load_b32 s5, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s4, s5, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4Fun1PiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4Fun1PiS_S_, .Lfunc_end0-_Z4Fun1PiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4Fun2PiS_S_
.globl _Z4Fun2PiS_S_
.p2align 8
.type _Z4Fun2PiS_S_,@function
_Z4Fun2PiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4Fun2PiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z4Fun2PiS_S_, .Lfunc_end1-_Z4Fun2PiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4Fun3PiS_S_i
.globl _Z4Fun3PiS_S_i
.p2align 8
.type _Z4Fun3PiS_S_i,@function
_Z4Fun3PiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB2_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB2_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4Fun3PiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z4Fun3PiS_S_i, .Lfunc_end2-_Z4Fun3PiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4Fun1PiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z4Fun1PiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4Fun2PiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z4Fun2PiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4Fun3PiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4Fun3PiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00148e36_00000000-6_q1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z4Fun1PiS_S_PiS_S_
.type _Z27__device_stub__Z4Fun1PiS_S_PiS_S_, @function
_Z27__device_stub__Z4Fun1PiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4Fun1PiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z4Fun1PiS_S_PiS_S_, .-_Z27__device_stub__Z4Fun1PiS_S_PiS_S_
.globl _Z4Fun1PiS_S_
.type _Z4Fun1PiS_S_, @function
_Z4Fun1PiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z4Fun1PiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z4Fun1PiS_S_, .-_Z4Fun1PiS_S_
.globl _Z27__device_stub__Z4Fun2PiS_S_PiS_S_
.type _Z27__device_stub__Z4Fun2PiS_S_PiS_S_, @function
_Z27__device_stub__Z4Fun2PiS_S_PiS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4Fun2PiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z27__device_stub__Z4Fun2PiS_S_PiS_S_, .-_Z27__device_stub__Z4Fun2PiS_S_PiS_S_
.globl _Z4Fun2PiS_S_
.type _Z4Fun2PiS_S_, @function
_Z4Fun2PiS_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z4Fun2PiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z4Fun2PiS_S_, .-_Z4Fun2PiS_S_
.globl _Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i
.type _Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i, @function
_Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i:
.LFB2086:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4Fun3PiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i, .-_Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i
.globl _Z4Fun3PiS_S_i
.type _Z4Fun3PiS_S_i, @function
_Z4Fun3PiS_S_i:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z4Fun3PiS_S_i, .-_Z4Fun3PiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Enter n "
.LC1:
.string "%d"
.LC2:
.string "\nEnter set 1\n"
.LC3:
.string "Enter set 2\n"
.LC4:
.string "Res 1\n"
.LC5:
.string "%d \n"
.LC6:
.string "Res 2\n"
.LC7:
.string "Res 3\n"
.LC8:
.string "%d \n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $496, %rsp
.cfi_def_cfa_offset 528
movq %fs:40, %rax
movq %rax, 488(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
leaq 12(%rsp), %rsi
leaq .LC1(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L28
leaq 80(%rsp), %rbp
movl $0, %ebx
leaq .LC1(%rip), %r12
.L29:
movq %rbp, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %ebx, 12(%rsp)
jg .L29
.L28:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L30
leaq 160(%rsp), %rbp
movl $0, %ebx
leaq .LC1(%rip), %r12
.L31:
movq %rbp, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %ebx, 12(%rsp)
jg .L31
.L30:
leaq 16(%rsp), %rdi
movl $80, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $80, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $80, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $80, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $80, %esi
call cudaMalloc@PLT
leaq 80(%rsp), %rsi
movl $1, %ecx
movl $80, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 160(%rsp), %rsi
movl $1, %ecx
movl $80, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl 12(%rsp), %eax
movl %eax, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 68(%rsp), %rdx
movl $1, %ecx
movq 56(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L48
.L32:
leaq 240(%rsp), %rdi
movl $2, %ecx
movl $80, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rsp), %eax
testl %eax, %eax
jle .L33
movl $0, %ebx
leaq .LC5(%rip), %rbp
.L34:
movl 240(%rsp,%rbx,4), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rsp), %eax
addq $1, %rbx
cmpl %ebx, %eax
jg .L34
.L33:
movl %eax, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 68(%rsp), %rdx
movl $1, %ecx
movq 56(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L49
.L35:
leaq 320(%rsp), %rdi
movl $2, %ecx
movl $80, %edx
movq 40(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rsp), %eax
testl %eax, %eax
jle .L36
movl $0, %ebx
leaq .LC5(%rip), %rbp
.L37:
movl 320(%rsp,%rbx,4), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rsp), %eax
addq $1, %rbx
cmpl %ebx, %eax
jg .L37
.L36:
movl $256, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
leal 255(%rax), %edx
testl %eax, %eax
cmovs %edx, %eax
sarl $8, %eax
movl %eax, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 68(%rsp), %rdx
movl $1, %ecx
movq 56(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L50
.L38:
leaq 400(%rsp), %rdi
movl $2, %ecx
movl $80, %edx
movq 48(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L39
movl $0, %ebx
leaq .LC8(%rip), %rbp
.L40:
movl 400(%rsp,%rbx,4), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpl %ebx, 12(%rsp)
jg .L40
.L39:
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 488(%rsp), %rax
subq %fs:40, %rax
jne .L51
movl $0, %eax
addq $496, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z27__device_stub__Z4Fun1PiS_S_PiS_S_
jmp .L32
.L49:
movq 40(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z27__device_stub__Z4Fun2PiS_S_PiS_S_
jmp .L35
.L50:
movl 12(%rsp), %ecx
movq 48(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z28__device_stub__Z4Fun3PiS_S_iPiS_S_i
jmp .L38
.L51:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC9:
.string "_Z4Fun3PiS_S_i"
.LC10:
.string "_Z4Fun2PiS_S_"
.LC11:
.string "_Z4Fun1PiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2089:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z4Fun3PiS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z4Fun2PiS_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z4Fun1PiS_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "q1.hip"
.globl _Z19__device_stub__Fun1PiS_S_ # -- Begin function _Z19__device_stub__Fun1PiS_S_
.p2align 4, 0x90
.type _Z19__device_stub__Fun1PiS_S_,@function
_Z19__device_stub__Fun1PiS_S_: # @_Z19__device_stub__Fun1PiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4Fun1PiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z19__device_stub__Fun1PiS_S_, .Lfunc_end0-_Z19__device_stub__Fun1PiS_S_
.cfi_endproc
# -- End function
.globl _Z19__device_stub__Fun2PiS_S_ # -- Begin function _Z19__device_stub__Fun2PiS_S_
.p2align 4, 0x90
.type _Z19__device_stub__Fun2PiS_S_,@function
_Z19__device_stub__Fun2PiS_S_: # @_Z19__device_stub__Fun2PiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4Fun2PiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z19__device_stub__Fun2PiS_S_, .Lfunc_end1-_Z19__device_stub__Fun2PiS_S_
.cfi_endproc
# -- End function
.globl _Z19__device_stub__Fun3PiS_S_i # -- Begin function _Z19__device_stub__Fun3PiS_S_i
.p2align 4, 0x90
.type _Z19__device_stub__Fun3PiS_S_i,@function
_Z19__device_stub__Fun3PiS_S_i: # @_Z19__device_stub__Fun3PiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4Fun3PiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z19__device_stub__Fun3PiS_S_i, .Lfunc_end2-_Z19__device_stub__Fun3PiS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 560
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $.L.str, %edi
xorl %eax, %eax
callq printf
leaq 4(%rsp), %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl $.Lstr, %edi
callq puts@PLT
cmpl $0, 4(%rsp)
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
leaq 448(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq __isoc23_scanf
incq %r14
movslq 4(%rsp), %rax
addq $4, %rbx
cmpq %rax, %r14
jl .LBB3_2
.LBB3_3: # %._crit_edge
movl $.Lstr.1, %edi
callq puts@PLT
cmpl $0, 4(%rsp)
jle .LBB3_6
# %bb.4: # %.lr.ph74.preheader
leaq 368(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_5: # %.lr.ph74
# =>This Inner Loop Header: Depth=1
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq __isoc23_scanf
incq %r14
movslq 4(%rsp), %rax
addq $4, %rbx
cmpq %rax, %r14
jl .LBB3_5
.LBB3_6: # %._crit_edge75
movabsq $4294967296, %rbx # imm = 0x100000000
leaq 16(%rsp), %rdi
movl $80, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $80, %esi
callq hipMalloc
leaq 88(%rsp), %rdi
movl $80, %esi
callq hipMalloc
leaq 80(%rsp), %rdi
movl $80, %esi
callq hipMalloc
leaq 112(%rsp), %rdi
movl $80, %esi
callq hipMalloc
movq 16(%rsp), %rdi
leaq 448(%rsp), %rsi
movl $80, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 368(%rsp), %rsi
movl $80, %edx
movl $1, %ecx
callq hipMemcpy
movl 4(%rsp), %edi
orq %rbx, %rdi
leaq 1(%rbx), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_8
# %bb.7:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq 88(%rsp), %rdx
movq %rax, 48(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 40(%rsp)
leaq 48(%rsp), %rax
movq %rax, 288(%rsp)
leaq 64(%rsp), %rax
movq %rax, 296(%rsp)
leaq 40(%rsp), %rax
movq %rax, 304(%rsp)
leaq 208(%rsp), %rdi
leaq 128(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 208(%rsp), %rsi
movl 216(%rsp), %edx
movq 128(%rsp), %rcx
movl 136(%rsp), %r8d
leaq 288(%rsp), %r9
movl $_Z4Fun1PiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_8:
movq 88(%rsp), %rsi
leaq 288(%rsp), %rdi
movl $80, %edx
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.2, %edi
callq puts@PLT
movl 4(%rsp), %eax
testl %eax, %eax
jle .LBB3_11
# %bb.9: # %.lr.ph78.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_10: # %.lr.ph78
# =>This Inner Loop Header: Depth=1
movl 288(%rsp,%r14,4), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
incq %r14
movslq 4(%rsp), %rax
cmpq %rax, %r14
jl .LBB3_10
.LBB3_11: # %._crit_edge79
movl %eax, %edx
orq %rbx, %rdx
leaq 1(%rbx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_13
# %bb.12:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq 80(%rsp), %rdx
movq %rax, 64(%rsp)
movq %rcx, 40(%rsp)
movq %rdx, 32(%rsp)
leaq 64(%rsp), %rax
movq %rax, 208(%rsp)
leaq 40(%rsp), %rax
movq %rax, 216(%rsp)
leaq 32(%rsp), %rax
movq %rax, 224(%rsp)
leaq 128(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 208(%rsp), %r9
movl $_Z4Fun2PiS_S_, %edi
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_13:
movq 80(%rsp), %rsi
leaq 208(%rsp), %rdi
movl $80, %edx
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.3, %edi
callq puts@PLT
movl 4(%rsp), %eax
testl %eax, %eax
jle .LBB3_16
# %bb.14: # %.lr.ph82.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_15: # %.lr.ph82
# =>This Inner Loop Header: Depth=1
movl 208(%rsp,%r14,4), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
incq %r14
movslq 4(%rsp), %rax
cmpq %rax, %r14
jl .LBB3_15
.LBB3_16: # %._crit_edge83
leal 255(%rax), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $8, %edi
orq %rbx, %rdi
addq $256, %rbx # imm = 0x100
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_18
# %bb.17:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq 112(%rsp), %rdx
movl 4(%rsp), %esi
movq %rax, 40(%rsp)
movq %rcx, 32(%rsp)
movq %rdx, 24(%rsp)
movl %esi, 108(%rsp)
leaq 40(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
leaq 108(%rsp), %rax
movq %rax, 152(%rsp)
leaq 48(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z4Fun3PiS_S_i, %edi
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_18:
movq 112(%rsp), %rsi
leaq 128(%rsp), %rdi
movl $80, %edx
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.4, %edi
callq puts@PLT
cmpl $0, 4(%rsp)
jle .LBB3_21
# %bb.19: # %.lr.ph87.preheader
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB3_20: # %.lr.ph87
# =>This Inner Loop Header: Depth=1
movl 128(%rsp,%rbx,4), %esi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
incq %rbx
movslq 4(%rsp), %rax
cmpq %rax, %rbx
jl .LBB3_20
.LBB3_21: # %._crit_edge88
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq 80(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4Fun1PiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4Fun2PiS_S_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4Fun3PiS_S_i, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4Fun1PiS_S_,@object # @_Z4Fun1PiS_S_
.section .rodata,"a",@progbits
.globl _Z4Fun1PiS_S_
.p2align 3, 0x0
_Z4Fun1PiS_S_:
.quad _Z19__device_stub__Fun1PiS_S_
.size _Z4Fun1PiS_S_, 8
.type _Z4Fun2PiS_S_,@object # @_Z4Fun2PiS_S_
.globl _Z4Fun2PiS_S_
.p2align 3, 0x0
_Z4Fun2PiS_S_:
.quad _Z19__device_stub__Fun2PiS_S_
.size _Z4Fun2PiS_S_, 8
.type _Z4Fun3PiS_S_i,@object # @_Z4Fun3PiS_S_i
.globl _Z4Fun3PiS_S_i
.p2align 3, 0x0
_Z4Fun3PiS_S_i:
.quad _Z19__device_stub__Fun3PiS_S_i
.size _Z4Fun3PiS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter n "
.size .L.str, 9
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%d \n"
.size .L.str.5, 6
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%d \n"
.size .L.str.8, 5
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4Fun1PiS_S_"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z4Fun2PiS_S_"
.size .L__unnamed_2, 14
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z4Fun3PiS_S_i"
.size .L__unnamed_3, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\nEnter set 1"
.size .Lstr, 13
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Enter set 2"
.size .Lstr.1, 12
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Res 1"
.size .Lstr.2, 6
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Res 2"
.size .Lstr.3, 6
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "Res 3"
.size .Lstr.4, 6
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__Fun1PiS_S_
.addrsig_sym _Z19__device_stub__Fun2PiS_S_
.addrsig_sym _Z19__device_stub__Fun3PiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4Fun1PiS_S_
.addrsig_sym _Z4Fun2PiS_S_
.addrsig_sym _Z4Fun3PiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(float *input, float *output, long no_elements, int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
} | code for sm_80
Function : _Z7upscalePfS_liiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ ULDC UR6, c[0x0][0xc] ; /* 0x0000030000067ab9 */
/* 0x000fe40000000800 */
/*0030*/ ULDC.64 UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0050*/ UIMAD UR4, UR4, UR6, URZ ; /* 0x00000006040472a4 */
/* 0x000fc6000f8e023f */
/*0060*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002600 */
/*0070*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fc6000f8e023f */
/*0080*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0090*/ IMAD R0, R3, c[0x0][0x0], R0 ; /* 0x0000000003007a24 */
/* 0x001fe400078e0200 */
/*00a0*/ IMAD R3, R2, UR4, R5 ; /* 0x0000000402037c24 */
/* 0x002fca000f8e0205 */
/*00b0*/ IADD3 R0, P1, R0, R3, RZ ; /* 0x0000000300007210 */
/* 0x000fc80007f3e0ff */
/*00c0*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f06070 */
/*00d0*/ IMAD.X R3, RZ, RZ, RZ, P1 ; /* 0x000000ffff037224 */
/* 0x000fca00008e06ff */
/*00e0*/ ISETP.GE.AND.EX P0, PT, R3, c[0x0][0x174], PT, P0 ; /* 0x00005d0003007a0c */
/* 0x000fda0003f06300 */
/*00f0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0100*/ IABS R2, c[0x0][0x184] ; /* 0x0000610000027a13 */
/* 0x000fe20000000000 */
/*0110*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0120*/ IABS R4, c[0x0][0x180] ; /* 0x0000600000047a13 */
/* 0x000fe40000000000 */
/*0130*/ I2F.RP R5, R2 ; /* 0x0000000200057306 */
/* 0x000e220000209400 */
/*0140*/ IABS R10, c[0x0][0x178] ; /* 0x00005e00000a7a13 */
/* 0x000fce0000000000 */
/*0150*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */
/* 0x001e240000001000 */
/*0160*/ IADD3 R6, R5, 0xffffffe, RZ ; /* 0x0ffffffe05067810 */
/* 0x001fe40007ffe0ff */
/*0170*/ IABS R5, R0 ; /* 0x0000000000057213 */
/* 0x000fc80000000000 */
/*0180*/ F2I.FTZ.U32.TRUNC.NTZ R7, R6 ; /* 0x0000000600077305 */
/* 0x000064000021f000 */
/*0190*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x001fe400078e00ff */
/*01a0*/ IMAD.MOV R9, RZ, RZ, -R7 ; /* 0x000000ffff097224 */
/* 0x002fc800078e0a07 */
/*01b0*/ IMAD R9, R9, R2, RZ ; /* 0x0000000209097224 */
/* 0x000fc800078e02ff */
/*01c0*/ IMAD.HI.U32 R7, R7, R9, R6 ; /* 0x0000000907077227 */
/* 0x000fe400078e0006 */
/*01d0*/ I2F.RP R9, R4 ; /* 0x0000000400097306 */
/* 0x000e280000209400 */
/*01e0*/ IMAD.HI.U32 R8, R7, R5, RZ ; /* 0x0000000507087227 */
/* 0x000fc800078e00ff */
/*01f0*/ IMAD.MOV R6, RZ, RZ, -R8 ; /* 0x000000ffff067224 */
/* 0x000fc800078e0a08 */
/*0200*/ IMAD R5, R2.reuse, R6, R5 ; /* 0x0000000602057224 */
/* 0x040fe200078e0205 */
/*0210*/ MUFU.RCP R9, R9 ; /* 0x0000000900097308 */
/* 0x001e280000001000 */
/*0220*/ ISETP.GT.U32.AND P1, PT, R2, R5, PT ; /* 0x000000050200720c */
/* 0x000fda0003f24070 */
/*0230*/ @!P1 IMAD.IADD R5, R5, 0x1, -R2 ; /* 0x0000000105059824 */
/* 0x000fe200078e0a02 */
/*0240*/ @!P1 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108089810 */
/* 0x000fe40007ffe0ff */
/*0250*/ IADD3 R6, R9, 0xffffffe, RZ ; /* 0x0ffffffe09067810 */
/* 0x001fe40007ffe0ff */
/*0260*/ ISETP.GE.U32.AND P0, PT, R5, R2, PT ; /* 0x000000020500720c */
/* 0x000fe40003f06070 */
/*0270*/ F2I.FTZ.U32.TRUNC.NTZ R7, R6 ; /* 0x0000000600077305 */
/* 0x000062000021f000 */
/*0280*/ LOP3.LUT R5, R0, c[0x0][0x184], RZ, 0x3c, !PT ; /* 0x0000610000057a12 */
/* 0x000fe400078e3cff */
/*0290*/ ISETP.NE.AND P1, PT, RZ, c[0x0][0x184], PT ; /* 0x00006100ff007a0c */
/* 0x000fc40003f25270 */
/*02a0*/ ISETP.GE.AND P2, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f46270 */
/*02b0*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x001fca00078e00ff */
/*02c0*/ @P0 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108080810 */
/* 0x000fe20007ffe0ff */
/*02d0*/ IMAD.MOV R5, RZ, RZ, -R7 ; /* 0x000000ffff057224 */
/* 0x002fcc00078e0a07 */
/*02e0*/ @!P2 IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff08a224 */
/* 0x000fe200078e0a08 */
/*02f0*/ @!P1 LOP3.LUT R8, RZ, c[0x0][0x184], RZ, 0x33, !PT ; /* 0x00006100ff089a12 */
/* 0x000fe200078e33ff */
/*0300*/ IMAD R5, R5, R4, RZ ; /* 0x0000000405057224 */
/* 0x000fc600078e02ff */
/*0310*/ IABS R9, R8 ; /* 0x0000000800097213 */
/* 0x000fe20000000000 */
/*0320*/ IMAD.HI.U32 R6, R7, R5, R6 ; /* 0x0000000507067227 */
/* 0x000fc800078e0006 */
/*0330*/ IMAD.MOV.U32 R7, RZ, RZ, R10 ; /* 0x000000ffff077224 */
/* 0x000fe400078e000a */
/*0340*/ IMAD.HI.U32 R5, R6, R9, RZ ; /* 0x0000000906057227 */
/* 0x000fe400078e00ff */
/*0350*/ I2F.RP R12, R7 ; /* 0x00000007000c7306 */
/* 0x000e240000209400 */
/*0360*/ IMAD.MOV R6, RZ, RZ, -R5 ; /* 0x000000ffff067224 */
/* 0x000fc800078e0a05 */
/*0370*/ IMAD R9, R4, R6, R9 ; /* 0x0000000604097224 */
/* 0x000fe200078e0209 */
/*0380*/ LOP3.LUT R6, R8, c[0x0][0x180], RZ, 0x3c, !PT ; /* 0x0000600008067a12 */
/* 0x000fc800078e3cff */
/*0390*/ ISETP.GT.U32.AND P1, PT, R4, R9, PT ; /* 0x000000090400720c */
/* 0x000fe40003f24070 */
/*03a0*/ ISETP.GE.AND P2, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f46270 */
/*03b0*/ MUFU.RCP R12, R12 ; /* 0x0000000c000c7308 */
/* 0x001e340000001000 */
/*03c0*/ @!P1 IMAD.IADD R9, R9, 0x1, -R4 ; /* 0x0000000109099824 */
/* 0x000fe200078e0a04 */
/*03d0*/ @!P1 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105059810 */
/* 0x000fc40007ffe0ff */
/*03e0*/ ISETP.NE.AND P1, PT, RZ, c[0x0][0x180], PT ; /* 0x00006000ff007a0c */
/* 0x000fe40003f25270 */
/*03f0*/ ISETP.GE.U32.AND P0, PT, R9, R4, PT ; /* 0x000000040900720c */
/* 0x000fe40003f06070 */
/*0400*/ IADD3 R10, R12, 0xffffffe, RZ ; /* 0x0ffffffe0c0a7810 */
/* 0x001fc80007ffe0ff */
/*0410*/ F2I.FTZ.U32.TRUNC.NTZ R11, R10 ; /* 0x0000000a000b7305 */
/* 0x00006e000021f000 */
/*0420*/ @P0 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105050810 */
/* 0x000fe20007ffe0ff */
/*0430*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x001fc800078e00ff */
/*0440*/ @!P2 IMAD.MOV R5, RZ, RZ, -R5 ; /* 0x000000ffff05a224 */
/* 0x000fe200078e0a05 */
/*0450*/ @!P1 LOP3.LUT R5, RZ, c[0x0][0x180], RZ, 0x33, !PT ; /* 0x00006000ff059a12 */
/* 0x000fe200078e33ff */
/*0460*/ IMAD.MOV R6, RZ, RZ, -R11 ; /* 0x000000ffff067224 */
/* 0x002fc800078e0a0b */
/*0470*/ IMAD.MOV R9, RZ, RZ, -R5 ; /* 0x000000ffff097224 */
/* 0x000fe400078e0a05 */
/*0480*/ IMAD R13, R6, R7, RZ ; /* 0x00000007060d7224 */
/* 0x000fe400078e02ff */
/*0490*/ IMAD R6, R9, c[0x0][0x180], R8 ; /* 0x0000600009067a24 */
/* 0x000fe400078e0208 */
/*04a0*/ IMAD.HI.U32 R13, R11, R13, R10 ; /* 0x0000000d0b0d7227 */
/* 0x000fc600078e000a */
/*04b0*/ IABS R12, R6 ; /* 0x00000006000c7213 */
/* 0x000fe20000000000 */
/*04c0*/ IMAD.MOV R11, RZ, RZ, -R8 ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e0a08 */
/*04d0*/ LOP3.LUT R6, R6, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e0006067a12 */
/* 0x000fe200078e3cff */
/*04e0*/ IMAD.HI.U32 R9, R13, R4, RZ ; /* 0x000000040d097227 */
/* 0x000fc800078e00ff */
/*04f0*/ IMAD R11, R11, c[0x0][0x184], R0 ; /* 0x000061000b0b7a24 */
/* 0x000fe400078e0200 */
/*0500*/ IMAD.MOV R14, RZ, RZ, -R9 ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e0a09 */
/*0510*/ IMAD.HI.U32 R8, R13, R12, RZ ; /* 0x0000000c0d087227 */
/* 0x000fe200078e00ff */
/*0520*/ IABS R18, R11 ; /* 0x0000000b00127213 */
/* 0x000fe40000000000 */
/*0530*/ LOP3.LUT R11, R11, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e000b0b7a12 */
/* 0x000fe200078e3cff */
/*0540*/ IMAD R14, R7, R14, R4 ; /* 0x0000000e070e7224 */
/* 0x000fe400078e0204 */
/*0550*/ IMAD.MOV R15, RZ, RZ, -R8 ; /* 0x000000ffff0f7224 */
/* 0x000fc400078e0a08 */
/*0560*/ IMAD.HI.U32 R10, R13, R2, RZ ; /* 0x000000020d0a7227 */
/* 0x000fe200078e00ff */
/*0570*/ ISETP.GT.U32.AND P1, PT, R7, R14, PT ; /* 0x0000000e0700720c */
/* 0x000fc60003f24070 */
/*0580*/ IMAD.HI.U32 R4, R13, R18, RZ ; /* 0x000000120d047227 */
/* 0x000fc800078e00ff */
/*0590*/ IMAD R12, R7.reuse, R15, R12 ; /* 0x0000000f070c7224 */
/* 0x040fe400078e020c */
/*05a0*/ IMAD.MOV R16, RZ, RZ, -R10 ; /* 0x000000ffff107224 */
/* 0x000fe400078e0a0a */
/*05b0*/ IMAD.MOV R13, RZ, RZ, -R4 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e0a04 */
/*05c0*/ ISETP.GT.U32.AND P0, PT, R7.reuse, R12, PT ; /* 0x0000000c0700720c */
/* 0x040fe20003f04070 */
/*05d0*/ IMAD R16, R7, R16, R2 ; /* 0x0000001007107224 */
/* 0x000fe200078e0202 */
/*05e0*/ @!P1 IADD3 R9, R9, 0x1, RZ ; /* 0x0000000109099810 */
/* 0x000fe20007ffe0ff */
/*05f0*/ IMAD R2, R7.reuse, R13, R18 ; /* 0x0000000d07027224 */
/* 0x040fe400078e0212 */
/*0600*/ @!P1 IMAD.IADD R14, R14, 0x1, -R7 ; /* 0x000000010e0e9824 */
/* 0x000fe200078e0a07 */
/*0610*/ ISETP.GT.U32.AND P5, PT, R7.reuse, R16, PT ; /* 0x000000100700720c */
/* 0x040fe20003fa4070 */
/*0620*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff0d7624 */
/* 0x000fe200078e00ff */
/*0630*/ ISETP.GT.U32.AND P6, PT, R7, R2, PT ; /* 0x000000020700720c */
/* 0x000fc40003fc4070 */
/*0640*/ ISETP.GE.U32.AND P2, PT, R14, R7.reuse, PT ; /* 0x000000070e00720c */
/* 0x080fe40003f46070 */
/*0650*/ LOP3.LUT R13, R13, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e000d0d7a12 */
/* 0x000fe200078e3cff */
/*0660*/ @!P0 IMAD.IADD R12, R12, 0x1, -R7.reuse ; /* 0x000000010c0c8824 */
/* 0x100fe200078e0a07 */
/*0670*/ @!P0 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108088810 */
/* 0x000fe40007ffe0ff */
/*0680*/ ISETP.GE.AND P3, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x000fe40003f66270 */
/*0690*/ ISETP.GE.U32.AND P4, PT, R12, R7.reuse, PT ; /* 0x000000070c00720c */
/* 0x080fe20003f86070 */
/*06a0*/ @!P5 IMAD.IADD R16, R16, 0x1, -R7.reuse ; /* 0x000000011010d824 */
/* 0x100fe200078e0a07 */
/*06b0*/ @!P5 IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0ad810 */
/* 0x000fe20007ffe0ff */
/*06c0*/ IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff0c7624 */
/* 0x000fe200078e00ff */
/*06d0*/ @!P6 IADD3 R4, R4, 0x1, RZ ; /* 0x000000010404e810 */
/* 0x000fe20007ffe0ff */
/*06e0*/ @!P6 IMAD.IADD R2, R2, 0x1, -R7 ; /* 0x000000010202e824 */
/* 0x000fe200078e0a07 */
/*06f0*/ ISETP.GE.U32.AND P0, PT, R16, R7, PT ; /* 0x000000071000720c */
/* 0x000fc40003f06070 */
/*0700*/ LOP3.LUT R12, R12, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e000c0c7a12 */
/* 0x000fe400078e3cff */
/*0710*/ ISETP.GE.U32.AND P1, PT, R2, R7, PT ; /* 0x000000070200720c */
/* 0x000fe40003f26070 */
/*0720*/ ISETP.GE.AND P6, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003fc6270 */
/*0730*/ @P2 IADD3 R9, R9, 0x1, RZ ; /* 0x0000000109092810 */
/* 0x000fe40007ffe0ff */
/*0740*/ ISETP.GE.AND P2, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */
/* 0x000fe40003f46270 */
/*0750*/ ISETP.GE.AND P5, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fe20003fa6270 */
/*0760*/ @!P3 IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff09b224 */
/* 0x000fe200078e0a09 */
/*0770*/ @P4 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108084810 */
/* 0x000fc40007ffe0ff */
/*0780*/ @P0 IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a0810 */
/* 0x000fe40007ffe0ff */
/*0790*/ @P1 IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104041810 */
/* 0x000fe20007ffe0ff */
/*07a0*/ @!P6 IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff08e224 */
/* 0x000fe200078e0a08 */
/*07b0*/ ISETP.NE.AND P4, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */
/* 0x000fe40003f85270 */
/*07c0*/ LOP3.LUT R7, RZ, c[0x0][0x178], RZ, 0x33, !PT ; /* 0x00005e00ff077a12 */
/* 0x000fe200078e33ff */
/*07d0*/ @!P2 IMAD.MOV R10, RZ, RZ, -R10 ; /* 0x000000ffff0aa224 */
/* 0x000fe400078e0a0a */
/*07e0*/ @!P5 IMAD.MOV R4, RZ, RZ, -R4 ; /* 0x000000ffff04d224 */
/* 0x000fe200078e0a04 */
/*07f0*/ SEL R2, R7, R9, !P4 ; /* 0x0000000907027207 */
/* 0x000fc40006000000 */
/*0800*/ SEL R8, R7.reuse, R8, !P4 ; /* 0x0000000807087207 */
/* 0x040fe40006000000 */
/*0810*/ SEL R10, R7.reuse, R10, !P4 ; /* 0x0000000a070a7207 */
/* 0x040fe40006000000 */
/*0820*/ SEL R7, R7, R4, !P4 ; /* 0x0000000407077207 */
/* 0x000fe20006000000 */
/*0830*/ IMAD R2, R5, R2, R8 ; /* 0x0000000205027224 */
/* 0x000fe400078e0208 */
/*0840*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe400078e00ff */
/*0850*/ IMAD R4, R2, R10, R7 ; /* 0x0000000a02047224 */
/* 0x000fc800078e0207 */
/*0860*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fcc00078e0205 */
/*0870*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0880*/ LEA R2, P0, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */
/* 0x000fc800078010ff */
/*0890*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P0 ; /* 0x00005b0000037a11 */
/* 0x000fca00000f1403 */
/*08a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x004fe2000c101904 */
/*08b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*08c0*/ BRA 0x8c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0900*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(float *input, float *output, long no_elements, int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
} | .file "tmpxft_00185578_00000000-6_upscale.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13translate_idxiiiii
.type _Z13translate_idxiiiii, @function
_Z13translate_idxiiiii:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z13translate_idxiiiii, .-_Z13translate_idxiiiii
.globl _Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii
.type _Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii, @function
_Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii:
.LFB2052:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z7upscalePfS_liiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii, .-_Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii
.globl _Z7upscalePfS_liiii
.type _Z7upscalePfS_liiii, @function
_Z7upscalePfS_liiii:
.LFB2053:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z7upscalePfS_liiii, .-_Z7upscalePfS_liiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7upscalePfS_liiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7upscalePfS_liiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(float *input, float *output, long no_elements, int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
} | #include <hip/hip_runtime.h>
#include "includes.h"
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(float *input, float *output, long no_elements, int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(float *input, float *output, long no_elements, int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7upscalePfS_liiii
.globl _Z7upscalePfS_liiii
.p2align 8
.type _Z7upscalePfS_liiii,@function
_Z7upscalePfS_liiii:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x34
s_load_b32 s5, s[0:1], 0x28
s_load_b64 s[2:3], s[0:1], 0x10
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s6, s4, 0xffff
s_mul_i32 s5, s5, s15
s_lshr_b32 s4, s4, 16
s_mul_i32 s5, s5, s6
v_mad_u64_u32 v[2:3], null, s14, s6, v[1:2]
v_mad_u64_u32 v[3:4], null, s5, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v0, s4, v3, v2
v_add_co_ci_u32_e64 v1, null, 0, 0, s4
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i64_e32 vcc_lo, s[2:3], v[0:1]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b64 s[2:3], s[0:1], 0x20
v_ashrrev_i32_e32 v4, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, v0, v4
v_xor_b32_e32 v5, v5, v4
s_waitcnt lgkmcnt(0)
s_ashr_i32 s4, s3, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_add_i32 s5, s3, s4
v_xor_b32_e32 v4, s4, v4
s_xor_b32 s5, s5, s4
v_cvt_f32_u32_e32 v2, s5
s_sub_i32 s6, 0, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v3, s6, v2
s_ashr_i32 s6, s2, 31
s_add_i32 s7, s2, s6
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s7, s7, s6
v_cvt_f32_u32_e32 v6, s7
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v3, v2, v3
s_sub_i32 s8, 0, s7
v_rcp_iflag_f32_e32 v6, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v3
v_mul_hi_u32 v2, v5, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v6, 0x4f7ffffe, v6
v_mul_lo_u32 v3, v2, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v3, v5, v3
v_add_nc_u32_e32 v5, 1, v2
v_subrev_nc_u32_e32 v7, s5, v3
v_cmp_le_u32_e32 vcc_lo, s5, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v2, v2, v5 :: v_dual_cndmask_b32 v3, v3, v7
v_add_nc_u32_e32 v5, 1, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s5, v3
v_cvt_u32_f32_e32 v3, v6
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_lo_u32 v5, s8, v3
s_load_b32 s8, s[0:1], 0x18
v_xor_b32_e32 v2, v2, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v3, v5
v_sub_nc_u32_e32 v2, v2, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v4, 31, v2
v_add_nc_u32_e32 v3, v3, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v6, v2, v4
s_waitcnt lgkmcnt(0)
s_ashr_i32 s9, s8, 31
s_add_i32 s8, s8, s9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_xor_b32_e32 v5, v6, v4
v_xor_b32_e32 v4, s6, v4
s_xor_b32 s8, s8, s9
v_mul_hi_u32 v3, v5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v6, v3, s7
v_sub_nc_u32_e32 v5, v5, v6
v_add_nc_u32_e32 v6, 1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_subrev_nc_u32_e32 v7, s7, v5
v_cmp_le_u32_e32 vcc_lo, s7, v5
v_cndmask_b32_e32 v3, v3, v6, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v5, v5, v7, vcc_lo
v_cvt_f32_u32_e32 v7, s8
v_add_nc_u32_e32 v6, 1, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s7, v5
v_rcp_iflag_f32_e32 v5, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v3, v3, v6, vcc_lo
v_xor_b32_e32 v3, v3, v4
s_delay_alu instid0(VALU_DEP_1)
v_sub_nc_u32_e32 v6, v3, v4
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v3, 0x4f7ffffe, v5
v_mul_lo_u32 v5, v2, s3
s_sub_i32 s3, 0, s8
v_mul_lo_u32 v4, v6, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v3, v3
v_readfirstlane_b32 s2, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_sub_nc_u32_e32 v2, v2, v4
v_sub_nc_u32_e32 v4, v0, v5
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_mul_i32 s3, s3, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v3, 31, v2
v_ashrrev_i32_e32 v5, 31, v4
s_mul_hi_u32 s3, s2, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
s_add_i32 s2, s2, s3
v_add_nc_u32_e32 v2, v2, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_nc_u32_e32 v4, v4, v5
s_xor_b32 s3, s6, s9
s_mul_hi_u32 s6, s7, s2
s_mul_hi_u32 s12, s5, s2
v_xor_b32_e32 v2, v2, v3
v_xor_b32_e32 v4, v4, v5
s_mul_i32 s10, s6, s8
s_add_i32 s11, s6, 1
s_sub_i32 s7, s7, s10
v_mul_hi_u32 v7, v2, s2
v_mul_hi_u32 v8, v4, s2
s_sub_i32 s10, s7, s8
s_cmp_ge_u32 s7, s8
v_xor_b32_e32 v3, s9, v3
s_cselect_b32 s6, s11, s6
s_cselect_b32 s7, s10, s7
s_add_i32 s2, s6, 1
v_mul_lo_u32 v9, v7, s8
v_mul_lo_u32 v10, v8, s8
s_cmp_ge_u32 s7, s8
s_mul_i32 s7, s12, s8
s_cselect_b32 s2, s2, s6
s_sub_i32 s5, s5, s7
s_xor_b32 s2, s2, s3
v_xor_b32_e32 v5, s9, v5
v_sub_nc_u32_e32 v2, v2, v9
v_add_nc_u32_e32 v9, 1, v7
v_sub_nc_u32_e32 v4, v4, v10
s_sub_i32 s3, s2, s3
s_xor_b32 s4, s4, s9
v_subrev_nc_u32_e32 v11, s8, v2
v_cmp_le_u32_e32 vcc_lo, s8, v2
s_add_i32 s6, s12, 1
s_sub_i32 s7, s5, s8
s_cmp_ge_u32 s5, s8
v_cndmask_b32_e32 v7, v7, v9, vcc_lo
v_dual_cndmask_b32 v2, v2, v11 :: v_dual_add_nc_u32 v9, 1, v8
v_cmp_le_u32_e32 vcc_lo, s8, v4
s_cselect_b32 s5, s7, s5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v10, 1, v7
v_cmp_le_u32_e64 s2, s8, v2
v_cndmask_b32_e32 v8, v8, v9, vcc_lo
v_subrev_nc_u32_e32 v9, s8, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v2, v7, v10, s2
v_dual_cndmask_b32 v4, v4, v9 :: v_dual_add_nc_u32 v7, 1, v8
s_cselect_b32 s2, s6, s12
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_xor_b32_e32 v2, v2, v3
s_add_i32 s6, s2, 1
v_cmp_le_u32_e32 vcc_lo, s8, v4
s_cmp_ge_u32 s5, s8
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
v_sub_nc_u32_e32 v2, v2, v3
s_cselect_b32 s2, s6, s2
v_cndmask_b32_e32 v4, v8, v7, vcc_lo
s_xor_b32 s5, s2, s4
s_sub_i32 s4, s5, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_xor_b32_e32 v7, v4, v5
v_mad_u64_u32 v[3:4], null, s3, v6, v[2:3]
s_load_b128 s[0:3], s[0:1], 0x0
v_sub_nc_u32_e32 v2, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[4:5], null, v3, s4, v[2:3]
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[4:5]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7upscalePfS_liiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7upscalePfS_liiii, .Lfunc_end0-_Z7upscalePfS_liiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7upscalePfS_liiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7upscalePfS_liiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(float *input, float *output, long no_elements, int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
} | .text
.file "upscale.hip"
.globl _Z22__device_stub__upscalePfS_liiii # -- Begin function _Z22__device_stub__upscalePfS_liiii
.p2align 4, 0x90
.type _Z22__device_stub__upscalePfS_liiii,@function
_Z22__device_stub__upscalePfS_liiii: # @_Z22__device_stub__upscalePfS_liiii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7upscalePfS_liiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z22__device_stub__upscalePfS_liiii, .Lfunc_end0-_Z22__device_stub__upscalePfS_liiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7upscalePfS_liiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7upscalePfS_liiii,@object # @_Z7upscalePfS_liiii
.section .rodata,"a",@progbits
.globl _Z7upscalePfS_liiii
.p2align 3, 0x0
_Z7upscalePfS_liiii:
.quad _Z22__device_stub__upscalePfS_liiii
.size _Z7upscalePfS_liiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7upscalePfS_liiii"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__upscalePfS_liiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7upscalePfS_liiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z7upscalePfS_liiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ ULDC UR6, c[0x0][0xc] ; /* 0x0000030000067ab9 */
/* 0x000fe40000000800 */
/*0030*/ ULDC.64 UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0050*/ UIMAD UR4, UR4, UR6, URZ ; /* 0x00000006040472a4 */
/* 0x000fc6000f8e023f */
/*0060*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002600 */
/*0070*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fc6000f8e023f */
/*0080*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0090*/ IMAD R0, R3, c[0x0][0x0], R0 ; /* 0x0000000003007a24 */
/* 0x001fe400078e0200 */
/*00a0*/ IMAD R3, R2, UR4, R5 ; /* 0x0000000402037c24 */
/* 0x002fca000f8e0205 */
/*00b0*/ IADD3 R0, P1, R0, R3, RZ ; /* 0x0000000300007210 */
/* 0x000fc80007f3e0ff */
/*00c0*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f06070 */
/*00d0*/ IMAD.X R3, RZ, RZ, RZ, P1 ; /* 0x000000ffff037224 */
/* 0x000fca00008e06ff */
/*00e0*/ ISETP.GE.AND.EX P0, PT, R3, c[0x0][0x174], PT, P0 ; /* 0x00005d0003007a0c */
/* 0x000fda0003f06300 */
/*00f0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0100*/ IABS R2, c[0x0][0x184] ; /* 0x0000610000027a13 */
/* 0x000fe20000000000 */
/*0110*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0120*/ IABS R4, c[0x0][0x180] ; /* 0x0000600000047a13 */
/* 0x000fe40000000000 */
/*0130*/ I2F.RP R5, R2 ; /* 0x0000000200057306 */
/* 0x000e220000209400 */
/*0140*/ IABS R10, c[0x0][0x178] ; /* 0x00005e00000a7a13 */
/* 0x000fce0000000000 */
/*0150*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */
/* 0x001e240000001000 */
/*0160*/ IADD3 R6, R5, 0xffffffe, RZ ; /* 0x0ffffffe05067810 */
/* 0x001fe40007ffe0ff */
/*0170*/ IABS R5, R0 ; /* 0x0000000000057213 */
/* 0x000fc80000000000 */
/*0180*/ F2I.FTZ.U32.TRUNC.NTZ R7, R6 ; /* 0x0000000600077305 */
/* 0x000064000021f000 */
/*0190*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x001fe400078e00ff */
/*01a0*/ IMAD.MOV R9, RZ, RZ, -R7 ; /* 0x000000ffff097224 */
/* 0x002fc800078e0a07 */
/*01b0*/ IMAD R9, R9, R2, RZ ; /* 0x0000000209097224 */
/* 0x000fc800078e02ff */
/*01c0*/ IMAD.HI.U32 R7, R7, R9, R6 ; /* 0x0000000907077227 */
/* 0x000fe400078e0006 */
/*01d0*/ I2F.RP R9, R4 ; /* 0x0000000400097306 */
/* 0x000e280000209400 */
/*01e0*/ IMAD.HI.U32 R8, R7, R5, RZ ; /* 0x0000000507087227 */
/* 0x000fc800078e00ff */
/*01f0*/ IMAD.MOV R6, RZ, RZ, -R8 ; /* 0x000000ffff067224 */
/* 0x000fc800078e0a08 */
/*0200*/ IMAD R5, R2.reuse, R6, R5 ; /* 0x0000000602057224 */
/* 0x040fe200078e0205 */
/*0210*/ MUFU.RCP R9, R9 ; /* 0x0000000900097308 */
/* 0x001e280000001000 */
/*0220*/ ISETP.GT.U32.AND P1, PT, R2, R5, PT ; /* 0x000000050200720c */
/* 0x000fda0003f24070 */
/*0230*/ @!P1 IMAD.IADD R5, R5, 0x1, -R2 ; /* 0x0000000105059824 */
/* 0x000fe200078e0a02 */
/*0240*/ @!P1 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108089810 */
/* 0x000fe40007ffe0ff */
/*0250*/ IADD3 R6, R9, 0xffffffe, RZ ; /* 0x0ffffffe09067810 */
/* 0x001fe40007ffe0ff */
/*0260*/ ISETP.GE.U32.AND P0, PT, R5, R2, PT ; /* 0x000000020500720c */
/* 0x000fe40003f06070 */
/*0270*/ F2I.FTZ.U32.TRUNC.NTZ R7, R6 ; /* 0x0000000600077305 */
/* 0x000062000021f000 */
/*0280*/ LOP3.LUT R5, R0, c[0x0][0x184], RZ, 0x3c, !PT ; /* 0x0000610000057a12 */
/* 0x000fe400078e3cff */
/*0290*/ ISETP.NE.AND P1, PT, RZ, c[0x0][0x184], PT ; /* 0x00006100ff007a0c */
/* 0x000fc40003f25270 */
/*02a0*/ ISETP.GE.AND P2, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f46270 */
/*02b0*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x001fca00078e00ff */
/*02c0*/ @P0 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108080810 */
/* 0x000fe20007ffe0ff */
/*02d0*/ IMAD.MOV R5, RZ, RZ, -R7 ; /* 0x000000ffff057224 */
/* 0x002fcc00078e0a07 */
/*02e0*/ @!P2 IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff08a224 */
/* 0x000fe200078e0a08 */
/*02f0*/ @!P1 LOP3.LUT R8, RZ, c[0x0][0x184], RZ, 0x33, !PT ; /* 0x00006100ff089a12 */
/* 0x000fe200078e33ff */
/*0300*/ IMAD R5, R5, R4, RZ ; /* 0x0000000405057224 */
/* 0x000fc600078e02ff */
/*0310*/ IABS R9, R8 ; /* 0x0000000800097213 */
/* 0x000fe20000000000 */
/*0320*/ IMAD.HI.U32 R6, R7, R5, R6 ; /* 0x0000000507067227 */
/* 0x000fc800078e0006 */
/*0330*/ IMAD.MOV.U32 R7, RZ, RZ, R10 ; /* 0x000000ffff077224 */
/* 0x000fe400078e000a */
/*0340*/ IMAD.HI.U32 R5, R6, R9, RZ ; /* 0x0000000906057227 */
/* 0x000fe400078e00ff */
/*0350*/ I2F.RP R12, R7 ; /* 0x00000007000c7306 */
/* 0x000e240000209400 */
/*0360*/ IMAD.MOV R6, RZ, RZ, -R5 ; /* 0x000000ffff067224 */
/* 0x000fc800078e0a05 */
/*0370*/ IMAD R9, R4, R6, R9 ; /* 0x0000000604097224 */
/* 0x000fe200078e0209 */
/*0380*/ LOP3.LUT R6, R8, c[0x0][0x180], RZ, 0x3c, !PT ; /* 0x0000600008067a12 */
/* 0x000fc800078e3cff */
/*0390*/ ISETP.GT.U32.AND P1, PT, R4, R9, PT ; /* 0x000000090400720c */
/* 0x000fe40003f24070 */
/*03a0*/ ISETP.GE.AND P2, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f46270 */
/*03b0*/ MUFU.RCP R12, R12 ; /* 0x0000000c000c7308 */
/* 0x001e340000001000 */
/*03c0*/ @!P1 IMAD.IADD R9, R9, 0x1, -R4 ; /* 0x0000000109099824 */
/* 0x000fe200078e0a04 */
/*03d0*/ @!P1 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105059810 */
/* 0x000fc40007ffe0ff */
/*03e0*/ ISETP.NE.AND P1, PT, RZ, c[0x0][0x180], PT ; /* 0x00006000ff007a0c */
/* 0x000fe40003f25270 */
/*03f0*/ ISETP.GE.U32.AND P0, PT, R9, R4, PT ; /* 0x000000040900720c */
/* 0x000fe40003f06070 */
/*0400*/ IADD3 R10, R12, 0xffffffe, RZ ; /* 0x0ffffffe0c0a7810 */
/* 0x001fc80007ffe0ff */
/*0410*/ F2I.FTZ.U32.TRUNC.NTZ R11, R10 ; /* 0x0000000a000b7305 */
/* 0x00006e000021f000 */
/*0420*/ @P0 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105050810 */
/* 0x000fe20007ffe0ff */
/*0430*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x001fc800078e00ff */
/*0440*/ @!P2 IMAD.MOV R5, RZ, RZ, -R5 ; /* 0x000000ffff05a224 */
/* 0x000fe200078e0a05 */
/*0450*/ @!P1 LOP3.LUT R5, RZ, c[0x0][0x180], RZ, 0x33, !PT ; /* 0x00006000ff059a12 */
/* 0x000fe200078e33ff */
/*0460*/ IMAD.MOV R6, RZ, RZ, -R11 ; /* 0x000000ffff067224 */
/* 0x002fc800078e0a0b */
/*0470*/ IMAD.MOV R9, RZ, RZ, -R5 ; /* 0x000000ffff097224 */
/* 0x000fe400078e0a05 */
/*0480*/ IMAD R13, R6, R7, RZ ; /* 0x00000007060d7224 */
/* 0x000fe400078e02ff */
/*0490*/ IMAD R6, R9, c[0x0][0x180], R8 ; /* 0x0000600009067a24 */
/* 0x000fe400078e0208 */
/*04a0*/ IMAD.HI.U32 R13, R11, R13, R10 ; /* 0x0000000d0b0d7227 */
/* 0x000fc600078e000a */
/*04b0*/ IABS R12, R6 ; /* 0x00000006000c7213 */
/* 0x000fe20000000000 */
/*04c0*/ IMAD.MOV R11, RZ, RZ, -R8 ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e0a08 */
/*04d0*/ LOP3.LUT R6, R6, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e0006067a12 */
/* 0x000fe200078e3cff */
/*04e0*/ IMAD.HI.U32 R9, R13, R4, RZ ; /* 0x000000040d097227 */
/* 0x000fc800078e00ff */
/*04f0*/ IMAD R11, R11, c[0x0][0x184], R0 ; /* 0x000061000b0b7a24 */
/* 0x000fe400078e0200 */
/*0500*/ IMAD.MOV R14, RZ, RZ, -R9 ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e0a09 */
/*0510*/ IMAD.HI.U32 R8, R13, R12, RZ ; /* 0x0000000c0d087227 */
/* 0x000fe200078e00ff */
/*0520*/ IABS R18, R11 ; /* 0x0000000b00127213 */
/* 0x000fe40000000000 */
/*0530*/ LOP3.LUT R11, R11, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e000b0b7a12 */
/* 0x000fe200078e3cff */
/*0540*/ IMAD R14, R7, R14, R4 ; /* 0x0000000e070e7224 */
/* 0x000fe400078e0204 */
/*0550*/ IMAD.MOV R15, RZ, RZ, -R8 ; /* 0x000000ffff0f7224 */
/* 0x000fc400078e0a08 */
/*0560*/ IMAD.HI.U32 R10, R13, R2, RZ ; /* 0x000000020d0a7227 */
/* 0x000fe200078e00ff */
/*0570*/ ISETP.GT.U32.AND P1, PT, R7, R14, PT ; /* 0x0000000e0700720c */
/* 0x000fc60003f24070 */
/*0580*/ IMAD.HI.U32 R4, R13, R18, RZ ; /* 0x000000120d047227 */
/* 0x000fc800078e00ff */
/*0590*/ IMAD R12, R7.reuse, R15, R12 ; /* 0x0000000f070c7224 */
/* 0x040fe400078e020c */
/*05a0*/ IMAD.MOV R16, RZ, RZ, -R10 ; /* 0x000000ffff107224 */
/* 0x000fe400078e0a0a */
/*05b0*/ IMAD.MOV R13, RZ, RZ, -R4 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e0a04 */
/*05c0*/ ISETP.GT.U32.AND P0, PT, R7.reuse, R12, PT ; /* 0x0000000c0700720c */
/* 0x040fe20003f04070 */
/*05d0*/ IMAD R16, R7, R16, R2 ; /* 0x0000001007107224 */
/* 0x000fe200078e0202 */
/*05e0*/ @!P1 IADD3 R9, R9, 0x1, RZ ; /* 0x0000000109099810 */
/* 0x000fe20007ffe0ff */
/*05f0*/ IMAD R2, R7.reuse, R13, R18 ; /* 0x0000000d07027224 */
/* 0x040fe400078e0212 */
/*0600*/ @!P1 IMAD.IADD R14, R14, 0x1, -R7 ; /* 0x000000010e0e9824 */
/* 0x000fe200078e0a07 */
/*0610*/ ISETP.GT.U32.AND P5, PT, R7.reuse, R16, PT ; /* 0x000000100700720c */
/* 0x040fe20003fa4070 */
/*0620*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff0d7624 */
/* 0x000fe200078e00ff */
/*0630*/ ISETP.GT.U32.AND P6, PT, R7, R2, PT ; /* 0x000000020700720c */
/* 0x000fc40003fc4070 */
/*0640*/ ISETP.GE.U32.AND P2, PT, R14, R7.reuse, PT ; /* 0x000000070e00720c */
/* 0x080fe40003f46070 */
/*0650*/ LOP3.LUT R13, R13, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e000d0d7a12 */
/* 0x000fe200078e3cff */
/*0660*/ @!P0 IMAD.IADD R12, R12, 0x1, -R7.reuse ; /* 0x000000010c0c8824 */
/* 0x100fe200078e0a07 */
/*0670*/ @!P0 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108088810 */
/* 0x000fe40007ffe0ff */
/*0680*/ ISETP.GE.AND P3, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x000fe40003f66270 */
/*0690*/ ISETP.GE.U32.AND P4, PT, R12, R7.reuse, PT ; /* 0x000000070c00720c */
/* 0x080fe20003f86070 */
/*06a0*/ @!P5 IMAD.IADD R16, R16, 0x1, -R7.reuse ; /* 0x000000011010d824 */
/* 0x100fe200078e0a07 */
/*06b0*/ @!P5 IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0ad810 */
/* 0x000fe20007ffe0ff */
/*06c0*/ IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff0c7624 */
/* 0x000fe200078e00ff */
/*06d0*/ @!P6 IADD3 R4, R4, 0x1, RZ ; /* 0x000000010404e810 */
/* 0x000fe20007ffe0ff */
/*06e0*/ @!P6 IMAD.IADD R2, R2, 0x1, -R7 ; /* 0x000000010202e824 */
/* 0x000fe200078e0a07 */
/*06f0*/ ISETP.GE.U32.AND P0, PT, R16, R7, PT ; /* 0x000000071000720c */
/* 0x000fc40003f06070 */
/*0700*/ LOP3.LUT R12, R12, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e000c0c7a12 */
/* 0x000fe400078e3cff */
/*0710*/ ISETP.GE.U32.AND P1, PT, R2, R7, PT ; /* 0x000000070200720c */
/* 0x000fe40003f26070 */
/*0720*/ ISETP.GE.AND P6, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003fc6270 */
/*0730*/ @P2 IADD3 R9, R9, 0x1, RZ ; /* 0x0000000109092810 */
/* 0x000fe40007ffe0ff */
/*0740*/ ISETP.GE.AND P2, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */
/* 0x000fe40003f46270 */
/*0750*/ ISETP.GE.AND P5, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fe20003fa6270 */
/*0760*/ @!P3 IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff09b224 */
/* 0x000fe200078e0a09 */
/*0770*/ @P4 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108084810 */
/* 0x000fc40007ffe0ff */
/*0780*/ @P0 IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a0810 */
/* 0x000fe40007ffe0ff */
/*0790*/ @P1 IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104041810 */
/* 0x000fe20007ffe0ff */
/*07a0*/ @!P6 IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff08e224 */
/* 0x000fe200078e0a08 */
/*07b0*/ ISETP.NE.AND P4, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */
/* 0x000fe40003f85270 */
/*07c0*/ LOP3.LUT R7, RZ, c[0x0][0x178], RZ, 0x33, !PT ; /* 0x00005e00ff077a12 */
/* 0x000fe200078e33ff */
/*07d0*/ @!P2 IMAD.MOV R10, RZ, RZ, -R10 ; /* 0x000000ffff0aa224 */
/* 0x000fe400078e0a0a */
/*07e0*/ @!P5 IMAD.MOV R4, RZ, RZ, -R4 ; /* 0x000000ffff04d224 */
/* 0x000fe200078e0a04 */
/*07f0*/ SEL R2, R7, R9, !P4 ; /* 0x0000000907027207 */
/* 0x000fc40006000000 */
/*0800*/ SEL R8, R7.reuse, R8, !P4 ; /* 0x0000000807087207 */
/* 0x040fe40006000000 */
/*0810*/ SEL R10, R7.reuse, R10, !P4 ; /* 0x0000000a070a7207 */
/* 0x040fe40006000000 */
/*0820*/ SEL R7, R7, R4, !P4 ; /* 0x0000000407077207 */
/* 0x000fe20006000000 */
/*0830*/ IMAD R2, R5, R2, R8 ; /* 0x0000000205027224 */
/* 0x000fe400078e0208 */
/*0840*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe400078e00ff */
/*0850*/ IMAD R4, R2, R10, R7 ; /* 0x0000000a02047224 */
/* 0x000fc800078e0207 */
/*0860*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fcc00078e0205 */
/*0870*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0880*/ LEA R2, P0, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000027a11 */
/* 0x000fc800078010ff */
/*0890*/ LEA.HI.X R3, R0, c[0x0][0x16c], R3, 0x2, P0 ; /* 0x00005b0000037a11 */
/* 0x000fca00000f1403 */
/*08a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x004fe2000c101904 */
/*08b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*08c0*/ BRA 0x8c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0900*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7upscalePfS_liiii
.globl _Z7upscalePfS_liiii
.p2align 8
.type _Z7upscalePfS_liiii,@function
_Z7upscalePfS_liiii:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x34
s_load_b32 s5, s[0:1], 0x28
s_load_b64 s[2:3], s[0:1], 0x10
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s6, s4, 0xffff
s_mul_i32 s5, s5, s15
s_lshr_b32 s4, s4, 16
s_mul_i32 s5, s5, s6
v_mad_u64_u32 v[2:3], null, s14, s6, v[1:2]
v_mad_u64_u32 v[3:4], null, s5, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v0, s4, v3, v2
v_add_co_ci_u32_e64 v1, null, 0, 0, s4
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i64_e32 vcc_lo, s[2:3], v[0:1]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b64 s[2:3], s[0:1], 0x20
v_ashrrev_i32_e32 v4, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, v0, v4
v_xor_b32_e32 v5, v5, v4
s_waitcnt lgkmcnt(0)
s_ashr_i32 s4, s3, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_add_i32 s5, s3, s4
v_xor_b32_e32 v4, s4, v4
s_xor_b32 s5, s5, s4
v_cvt_f32_u32_e32 v2, s5
s_sub_i32 s6, 0, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v3, s6, v2
s_ashr_i32 s6, s2, 31
s_add_i32 s7, s2, s6
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s7, s7, s6
v_cvt_f32_u32_e32 v6, s7
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v3, v2, v3
s_sub_i32 s8, 0, s7
v_rcp_iflag_f32_e32 v6, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v3
v_mul_hi_u32 v2, v5, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v6, 0x4f7ffffe, v6
v_mul_lo_u32 v3, v2, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v3, v5, v3
v_add_nc_u32_e32 v5, 1, v2
v_subrev_nc_u32_e32 v7, s5, v3
v_cmp_le_u32_e32 vcc_lo, s5, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v2, v2, v5 :: v_dual_cndmask_b32 v3, v3, v7
v_add_nc_u32_e32 v5, 1, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s5, v3
v_cvt_u32_f32_e32 v3, v6
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_lo_u32 v5, s8, v3
s_load_b32 s8, s[0:1], 0x18
v_xor_b32_e32 v2, v2, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v3, v5
v_sub_nc_u32_e32 v2, v2, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v4, 31, v2
v_add_nc_u32_e32 v3, v3, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v6, v2, v4
s_waitcnt lgkmcnt(0)
s_ashr_i32 s9, s8, 31
s_add_i32 s8, s8, s9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_xor_b32_e32 v5, v6, v4
v_xor_b32_e32 v4, s6, v4
s_xor_b32 s8, s8, s9
v_mul_hi_u32 v3, v5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v6, v3, s7
v_sub_nc_u32_e32 v5, v5, v6
v_add_nc_u32_e32 v6, 1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_subrev_nc_u32_e32 v7, s7, v5
v_cmp_le_u32_e32 vcc_lo, s7, v5
v_cndmask_b32_e32 v3, v3, v6, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v5, v5, v7, vcc_lo
v_cvt_f32_u32_e32 v7, s8
v_add_nc_u32_e32 v6, 1, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s7, v5
v_rcp_iflag_f32_e32 v5, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v3, v3, v6, vcc_lo
v_xor_b32_e32 v3, v3, v4
s_delay_alu instid0(VALU_DEP_1)
v_sub_nc_u32_e32 v6, v3, v4
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v3, 0x4f7ffffe, v5
v_mul_lo_u32 v5, v2, s3
s_sub_i32 s3, 0, s8
v_mul_lo_u32 v4, v6, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v3, v3
v_readfirstlane_b32 s2, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_sub_nc_u32_e32 v2, v2, v4
v_sub_nc_u32_e32 v4, v0, v5
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_mul_i32 s3, s3, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v3, 31, v2
v_ashrrev_i32_e32 v5, 31, v4
s_mul_hi_u32 s3, s2, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
s_add_i32 s2, s2, s3
v_add_nc_u32_e32 v2, v2, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_nc_u32_e32 v4, v4, v5
s_xor_b32 s3, s6, s9
s_mul_hi_u32 s6, s7, s2
s_mul_hi_u32 s12, s5, s2
v_xor_b32_e32 v2, v2, v3
v_xor_b32_e32 v4, v4, v5
s_mul_i32 s10, s6, s8
s_add_i32 s11, s6, 1
s_sub_i32 s7, s7, s10
v_mul_hi_u32 v7, v2, s2
v_mul_hi_u32 v8, v4, s2
s_sub_i32 s10, s7, s8
s_cmp_ge_u32 s7, s8
v_xor_b32_e32 v3, s9, v3
s_cselect_b32 s6, s11, s6
s_cselect_b32 s7, s10, s7
s_add_i32 s2, s6, 1
v_mul_lo_u32 v9, v7, s8
v_mul_lo_u32 v10, v8, s8
s_cmp_ge_u32 s7, s8
s_mul_i32 s7, s12, s8
s_cselect_b32 s2, s2, s6
s_sub_i32 s5, s5, s7
s_xor_b32 s2, s2, s3
v_xor_b32_e32 v5, s9, v5
v_sub_nc_u32_e32 v2, v2, v9
v_add_nc_u32_e32 v9, 1, v7
v_sub_nc_u32_e32 v4, v4, v10
s_sub_i32 s3, s2, s3
s_xor_b32 s4, s4, s9
v_subrev_nc_u32_e32 v11, s8, v2
v_cmp_le_u32_e32 vcc_lo, s8, v2
s_add_i32 s6, s12, 1
s_sub_i32 s7, s5, s8
s_cmp_ge_u32 s5, s8
v_cndmask_b32_e32 v7, v7, v9, vcc_lo
v_dual_cndmask_b32 v2, v2, v11 :: v_dual_add_nc_u32 v9, 1, v8
v_cmp_le_u32_e32 vcc_lo, s8, v4
s_cselect_b32 s5, s7, s5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v10, 1, v7
v_cmp_le_u32_e64 s2, s8, v2
v_cndmask_b32_e32 v8, v8, v9, vcc_lo
v_subrev_nc_u32_e32 v9, s8, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v2, v7, v10, s2
v_dual_cndmask_b32 v4, v4, v9 :: v_dual_add_nc_u32 v7, 1, v8
s_cselect_b32 s2, s6, s12
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_xor_b32_e32 v2, v2, v3
s_add_i32 s6, s2, 1
v_cmp_le_u32_e32 vcc_lo, s8, v4
s_cmp_ge_u32 s5, s8
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
v_sub_nc_u32_e32 v2, v2, v3
s_cselect_b32 s2, s6, s2
v_cndmask_b32_e32 v4, v8, v7, vcc_lo
s_xor_b32 s5, s2, s4
s_sub_i32 s4, s5, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_xor_b32_e32 v7, v4, v5
v_mad_u64_u32 v[3:4], null, s3, v6, v[2:3]
s_load_b128 s[0:3], s[0:1], 0x0
v_sub_nc_u32_e32 v2, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[4:5], null, v3, s4, v[2:3]
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[4:5]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7upscalePfS_liiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7upscalePfS_liiii, .Lfunc_end0-_Z7upscalePfS_liiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7upscalePfS_liiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7upscalePfS_liiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00185578_00000000-6_upscale.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13translate_idxiiiii
.type _Z13translate_idxiiiii, @function
_Z13translate_idxiiiii:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z13translate_idxiiiii, .-_Z13translate_idxiiiii
.globl _Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii
.type _Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii, @function
_Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii:
.LFB2052:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z7upscalePfS_liiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii, .-_Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii
.globl _Z7upscalePfS_liiii
.type _Z7upscalePfS_liiii, @function
_Z7upscalePfS_liiii:
.LFB2053:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z33__device_stub__Z7upscalePfS_liiiiPfS_liiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z7upscalePfS_liiii, .-_Z7upscalePfS_liiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7upscalePfS_liiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7upscalePfS_liiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "upscale.hip"
.globl _Z22__device_stub__upscalePfS_liiii # -- Begin function _Z22__device_stub__upscalePfS_liiii
.p2align 4, 0x90
.type _Z22__device_stub__upscalePfS_liiii,@function
_Z22__device_stub__upscalePfS_liiii: # @_Z22__device_stub__upscalePfS_liiii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7upscalePfS_liiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z22__device_stub__upscalePfS_liiii, .Lfunc_end0-_Z22__device_stub__upscalePfS_liiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7upscalePfS_liiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7upscalePfS_liiii,@object # @_Z7upscalePfS_liiii
.section .rodata,"a",@progbits
.globl _Z7upscalePfS_liiii
.p2align 3, 0x0
_Z7upscalePfS_liiii:
.quad _Z22__device_stub__upscalePfS_liiii
.size _Z7upscalePfS_liiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7upscalePfS_liiii"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__upscalePfS_liiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7upscalePfS_liiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#define WIDTH 7
#define HEIGHT 3
#define THREADS_PER_BLOCK 32
#define CUDA_CHECK(err) if(err != cudaSuccess)\
{\
printf("cudaMalloc returned error %s (code %d) (file %s) (line %d)\n", cudaGetErrorString(err), err, __FILE__, __LINE__);\
}\
__global__ void transpose(int *input, int *output, int width, int height)
{
__shared__ int temp[THREADS_PER_BLOCK][THREADS_PER_BLOCK];
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
int id_in = yIndex * width + xIndex;
temp[threadIdx.y][threadIdx.x] = input[id_in];
}
__syncthreads();
xIndex = blockIdx.y * blockDim.y + threadIdx.x;
yIndex = blockIdx.x * blockDim.x + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
int id_out = yIndex * height + xIndex;
output[id_out] = temp[threadIdx.x][threadIdx.y];
}
}
inline __device__
void PrefixSum(int* output, int* input, int w, int nextpow2)
{
extern __shared__ int temp[];
const int tdx = threadIdx.x;
int offset = 1;
const int tdx2 = 2*tdx;
const int tdx2p = tdx2 + 1;
temp[tdx2] = tdx2 < w ? input[tdx2] : 0;
temp[tdx2p] = tdx2p < w ? input[tdx2p] : 0;
for(int d = nextpow2>>1; d > 0; d >>= 1) {
__syncthreads();
if(tdx < d)
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
int last = temp[nextpow2 - 1];
if(tdx == 0) temp[nextpow2 - 1] = 0;
for(int d = 1; d < nextpow2; d *= 2) {
offset >>= 1;
__syncthreads();
if(tdx < d )
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if(tdx2 < w) output[tdx2] = temp[tdx2];
if(tdx2p < w) output[tdx2p] = temp[tdx2p];
if(tdx2p < w) output[w] = last;
}
__global__ void KernPrefixSumRows(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*width-1, in+row*width, width, 2*blockDim.x );
}
__global__ void KernPrefixSumRowsTrans(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*(width+1)+(width+1), in+row*width, width, 2*blockDim.x );
}
void PrefixSumRows(int *out, int *in, int *outT, int height, int width)
{
dim3 blockDim = dim3( 1, 1);
while(blockDim.x < ceil(width/2.0f)) blockDim.x <<= 1;
dim3 gridDim = dim3( 1, height );
KernPrefixSumRows<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,in,height,width);
cudaDeviceSynchronize();
dim3 gridSize, blockSize;
gridSize.x = (int)((width + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((height + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, width, height);
cudaDeviceSynchronize();
memset(out, 0, (HEIGHT+1)*sizeof(int));
blockDim = dim3( 1, 1);
while(blockDim.x < ceil((height)/2.0f)) blockDim.x <<= 1;
gridDim = dim3( 1, width );
KernPrefixSumRowsTrans<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,outT,width,height);
cudaDeviceSynchronize();
gridSize.x = (int)((height+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((width+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, height+1, width+1);
cudaDeviceSynchronize();
}
void ComputeIntegrals(const unsigned char *Img, int *Integral) {
const int SUM_WIDTH_STEP = (WIDTH+1);
#define SUM_TYPE int
int iW = WIDTH; // image dimensions
int iH = HEIGHT;
int sW = WIDTH+1; // sum dimensions
unsigned char *ImgPtr = 0;
SUM_TYPE *IntegPtr = 0;
// write zeros to first row
memset(Integral, 0, (WIDTH+1)*sizeof(int));
//#if WITH_CUDA
// CudaComputeIntegralImages(Img, Integral, TiltedIntegral, SUM_WIDTH_STEP, cudaComputeStream);
//#else
{
int yy=1;
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
// compute regular integral and first pass of tilted
for(int yy=2; yy<=iH; yy++){
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
printf("\n\n");
//#endif
}
int main() {
unsigned char *Img=0;
int *ImgInt=0;
int *Integral=0;
int *IntegralTransposed=0;
clock_t start, end;
CUDA_CHECK( cudaMallocManaged((void **) &Img, WIDTH*HEIGHT) );
CUDA_CHECK( cudaMallocManaged((void **) &ImgInt, WIDTH*HEIGHT*sizeof(int)) );
CUDA_CHECK( cudaMallocManaged((void **) &Integral, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
CUDA_CHECK( cudaMallocManaged((void **) &IntegralTransposed, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
for (int i=0; i<WIDTH*HEIGHT; i++) Img[i] = 1;
for (int i=0; i<WIDTH*HEIGHT; i++) ImgInt[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) IntegralTransposed[i] = 1;
start = clock();
ComputeIntegrals(Img, Integral);
end = clock();
printf("CPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
int *IntegPtr;
unsigned char *ImgPtr;
// input
printf("Input\n\n");
for (int i=0; i<HEIGHT; i++) {
for (int j=0; j<WIDTH; j++) {
ImgPtr = Img + i * WIDTH + j;
printf("%d ", *ImgPtr);
}
printf("\n");
}
printf("\n\n");
printf("Output CPU");
printf("\n\n");
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = Integral + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
printf("\n\n");
printf("OUTPUT GPU");
printf("\n\n");
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 0;
//CudaComputeIntegralImages(ImgInt, Integral, IntegralTransposed);
start = clock();
PrefixSumRows(Integral, ImgInt, IntegralTransposed, HEIGHT, WIDTH);
end = clock();
printf("GPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = IntegralTransposed + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
cudaFree(Img);
cudaFree(ImgInt);
cudaFree(Integral);
cudaFree(IntegralTransposed);
return 0;
} | code for sm_80
Function : _Z22KernPrefixSumRowsTransPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R10, SR_CTAID.Y ; /* 0x00000000000a7919 */
/* 0x000e620000002600 */
/*0050*/ IMAD.SHL.U32 R3, R0, 0x2, RZ ; /* 0x0000000200037824 */
/* 0x001fe400078e00ff */
/*0060*/ IMAD R5, R10, c[0x0][0x174], RZ ; /* 0x00005d000a057a24 */
/* 0x002fc600078e02ff */
/*0070*/ IADD3 R4, R3, 0x1, RZ ; /* 0x0000000103047810 */
/* 0x000fe40007ffe0ff */
/*0080*/ SHF.R.S32.HI R2, RZ, 0x1f, R3 ; /* 0x0000001fff027819 */
/* 0x000fe40000011403 */
/*0090*/ IADD3 R7, P2, R5, R3, RZ ; /* 0x0000000305077210 */
/* 0x000fe40007f5e0ff */
/*00a0*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fe40003f06270 */
/*00b0*/ ISETP.GE.AND P1, PT, R4, c[0x0][0x174], PT ; /* 0x00005d0004007a0c */
/* 0x000fe40003f26270 */
/*00c0*/ LEA.HI.X.SX32 R8, R5, R2, 0x1, P2 ; /* 0x0000000205087211 */
/* 0x000fe200010f0eff */
/*00d0*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */
/* 0x000fe200078e00ff */
/*00e0*/ LEA R6, P2, R7, c[0x0][0x168], 0x2 ; /* 0x00005a0007067a11 */
/* 0x000fc800078410ff */
/*00f0*/ LEA.HI.X R7, R7, c[0x0][0x16c], R8, 0x2, P2 ; /* 0x00005b0007077a11 */
/* 0x000fca00010f1408 */
/*0100*/ @!P0 LDG.E R9, [R6.64] ; /* 0x0000000806098981 */
/* 0x000ea8000c1e1900 */
/*0110*/ @!P1 LDG.E R5, [R6.64+0x4] ; /* 0x0000040806059981 */
/* 0x000ee2000c1e1900 */
/*0120*/ IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0c7624 */
/* 0x000fe200078e00ff */
/*0130*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0140*/ IMAD.MOV.U32 R11, RZ, RZ, 0x1 ; /* 0x00000001ff0b7424 */
/* 0x000fe200078e00ff */
/*0150*/ USHF.L.U32 UR4, UR4, 0x3, URZ ; /* 0x0000000304047899 */
/* 0x000fe2000800063f */
/*0160*/ IMAD.SHL.U32 R12, R12, 0x2, RZ ; /* 0x000000020c0c7824 */
/* 0x000fca00078e00ff */
/*0170*/ ISETP.GE.AND P2, PT, R12, 0x2, PT ; /* 0x000000020c00780c */
/* 0x000fe20003f46270 */
/*0180*/ STS [R0.X8], R9 ; /* 0x0000000900007388 */
/* 0x0041e80000008800 */
/*0190*/ STS [R0.X8+0x4], R5 ; /* 0x0000040500007388 */
/* 0x0081f00000008800 */
/*01a0*/ @!P2 BRA 0x2b0 ; /* 0x000001000000a947 */
/* 0x000fea0003800000 */
/*01b0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x1 ; /* 0x00000001ff0b7424 */
/* 0x000fe400078e00ff */
/*01c0*/ IMAD.MOV.U32 R5, RZ, RZ, R12 ; /* 0x000000ffff057224 */
/* 0x001fca00078e000c */
/*01d0*/ SHF.R.S32.HI R9, RZ, 0x1, R5 ; /* 0x00000001ff097819 */
/* 0x000fe20000011405 */
/*01e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe60000010000 */
/*01f0*/ ISETP.GE.AND P3, PT, R0, R9, PT ; /* 0x000000090000720c */
/* 0x000fda0003f66270 */
/*0200*/ @!P3 IMAD R6, R4, R11, RZ ; /* 0x0000000b0406b224 */
/* 0x000fc800078e02ff */
/*0210*/ @!P3 IMAD.SHL.U32 R8, R6, 0x4, RZ ; /* 0x000000040608b824 */
/* 0x000fc800078e00ff */
/*0220*/ @!P3 IMAD R8, R11.reuse, 0x4, R8 ; /* 0x000000040b08b824 */
/* 0x040fe200078e0208 */
/*0230*/ @!P3 LDS R6, [R6.X4+-0x4] ; /* 0xfffffc000606b984 */
/* 0x000fe20000004800 */
/*0240*/ IMAD.SHL.U32 R11, R11, 0x2, RZ ; /* 0x000000020b0b7824 */
/* 0x000fc600078e00ff */
/*0250*/ @!P3 LDS R7, [R8+-0x4] ; /* 0xfffffc000807b984 */
/* 0x000e240000000800 */
/*0260*/ @!P3 IMAD.IADD R7, R7, 0x1, R6 ; /* 0x000000010707b824 */
/* 0x001fca00078e0206 */
/*0270*/ @!P3 STS [R8+-0x4], R7 ; /* 0xfffffc070800b388 */
/* 0x0001e20000000800 */
/*0280*/ ISETP.GT.AND P3, PT, R5, 0x3, PT ; /* 0x000000030500780c */
/* 0x000fe20003f64270 */
/*0290*/ IMAD.MOV.U32 R5, RZ, RZ, R9 ; /* 0x000000ffff057224 */
/* 0x000fd800078e0009 */
/*02a0*/ @P3 BRA 0x1d0 ; /* 0xffffff2000003947 */
/* 0x001fea000383ffff */
/*02b0*/ ISETP.NE.AND P3, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f65270 */
/*02c0*/ ULDC UR5, c[0x0][0x174] ; /* 0x00005d0000057ab9 */
/* 0x000fe20000000800 */
/*02d0*/ LDS R7, [UR4+-0x4] ; /* 0xfffffc04ff077984 */
/* 0x000e620008000800 */
/*02e0*/ UIADD3 UR6, UR5, 0x1, URZ ; /* 0x0000000105067890 */
/* 0x000fcc000fffe03f */
/*02f0*/ IMAD R5, R10, UR6, RZ ; /* 0x000000060a057c24 */
/* 0x001fc8000f8e02ff */
/*0300*/ @!P3 STS [UR4+-0x4], RZ ; /* 0xfffffcffff00b988 */
/* 0x000fe20008000804 */
/*0310*/ USHF.R.S32.HI UR4, URZ, 0x1f, UR5 ; /* 0x0000001f3f047899 */
/* 0x000fe20008011405 */
/*0320*/ IADD3 R10, P3, R5, c[0x0][0x174], RZ ; /* 0x00005d00050a7a10 */
/* 0x000fca0007f7e0ff */
/*0330*/ LEA.HI.X.SX32 R15, R5, UR4, 0x1, P3 ; /* 0x00000004050f7c11 */
/* 0x000fe200098f0eff */
/*0340*/ @!P2 BRA 0x480 ; /* 0x000001300000a947 */
/* 0x000fea0003800000 */
/*0350*/ UMOV UR5, 0x1 ; /* 0x0000000100057882 */
/* 0x000fe40000000000 */
/*0360*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0370*/ ISETP.GE.AND P2, PT, R0, UR5, PT ; /* 0x0000000500007c0c */
/* 0x000fe2000bf46270 */
/*0380*/ USHF.L.U32 UR5, UR5, 0x1, URZ ; /* 0x0000000105057899 */
/* 0x000fe2000800063f */
/*0390*/ SHF.R.S32.HI R11, RZ, 0x1, R11 ; /* 0x00000001ff0b7819 */
/* 0x000fc6000001140b */
/*03a0*/ BSSY B0, 0x470 ; /* 0x000000c000007945 */
/* 0x000fe40003800000 */
/*03b0*/ ISETP.LE.AND P3, PT, R12, UR5, PT ; /* 0x000000050c007c0c */
/* 0x000fcc000bf63270 */
/*03c0*/ @P2 BRA 0x460 ; /* 0x0000009000002947 */
/* 0x001fea0003800000 */
/*03d0*/ IMAD R5, R4, R11, RZ ; /* 0x0000000b04057224 */
/* 0x000fc800078e02ff */
/*03e0*/ IMAD.SHL.U32 R6, R5, 0x4, RZ ; /* 0x0000000405067824 */
/* 0x000fc800078e00ff */
/*03f0*/ IMAD R13, R11, 0x4, R6 ; /* 0x000000040b0d7824 */
/* 0x000fe400078e0206 */
/*0400*/ LDS R6, [R5.X4+-0x4] ; /* 0xfffffc0005067984 */
/* 0x000fe80000004800 */
/*0410*/ LDS R8, [R13+-0x4] ; /* 0xfffffc000d087984 */
/* 0x000e280000000800 */
/*0420*/ STS [R5.X4+-0x4], R8 ; /* 0xfffffc0805007388 */
/* 0x001fe80000004800 */
/*0430*/ LDS R9, [R13+-0x4] ; /* 0xfffffc000d097984 */
/* 0x000e240000000800 */
/*0440*/ IMAD.IADD R6, R6, 0x1, R9 ; /* 0x0000000106067824 */
/* 0x001fca00078e0209 */
/*0450*/ STS [R13+-0x4], R6 ; /* 0xfffffc060d007388 */
/* 0x0001e40000000800 */
/*0460*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0470*/ @!P3 BRA 0x360 ; /* 0xfffffee00000b947 */
/* 0x000fea000383ffff */
/*0480*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0490*/ IADD3 R3, P2, R3, R10, RZ ; /* 0x0000000a03037210 */
/* 0x000fca0007f5e0ff */
/*04a0*/ IMAD.X R4, R2, 0x1, R15, P2 ; /* 0x0000000102047824 */
/* 0x000fe200010e060f */
/*04b0*/ LEA R2, P2, R3, c[0x0][0x160], 0x2 ; /* 0x0000580003027a11 */
/* 0x000fc800078410ff */
/*04c0*/ LEA.HI.X R3, R3, c[0x0][0x164], R4, 0x2, P2 ; /* 0x0000590003037a11 */
/* 0x000fe200010f1404 */
/*04d0*/ @!P0 LDS R5, [R0.X8] ; /* 0x0000000000058984 */
/* 0x000ea80000008800 */
/*04e0*/ @!P0 STG.E [R2.64+0x4], R5 ; /* 0x0000040502008986 */
/* 0x0045e2000c101908 */
/*04f0*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0500*/ LDS R9, [R0.X8+0x4] ; /* 0x0000040000097984 */
/* 0x000ee20000008800 */
/*0510*/ IADD3 R5, P0, R10, c[0x0][0x174], RZ ; /* 0x00005d000a057a10 */
/* 0x004fc80007f1e0ff */
/*0520*/ IADD3.X R6, R15, UR4, RZ, P0, !PT ; /* 0x000000040f067c10 */
/* 0x001fe400087fe4ff */
/*0530*/ LEA R4, P0, R5, c[0x0][0x160], 0x2 ; /* 0x0000580005047a11 */
/* 0x000fc800078010ff */
/*0540*/ LEA.HI.X R5, R5, c[0x0][0x164], R6, 0x2, P0 ; /* 0x0000590005057a11 */
/* 0x000fe200000f1406 */
/*0550*/ STG.E [R2.64+0x8], R9 ; /* 0x0000080902007986 */
/* 0x008fe8000c101908 */
/*0560*/ STG.E [R4.64+0x4], R7 ; /* 0x0000040704007986 */
/* 0x002fe2000c101908 */
/*0570*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0580*/ BRA 0x580; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0600*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0610*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0620*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0630*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0640*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z17KernPrefixSumRowsPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ S2UR UR4, SR_CTAID.Y ; /* 0x00000000000479c3 */
/* 0x000e620000002600 */
/*0030*/ ULDC UR5, c[0x0][0x174] ; /* 0x00005d0000057ab9 */
/* 0x000fe20000000800 */
/*0040*/ IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff077224 */
/* 0x000fe200078e00ff */
/*0050*/ ULDC.64 UR12, c[0x0][0x118] ; /* 0x00004600000c7ab9 */
/* 0x000fe20000000a00 */
/*0060*/ IMAD.SHL.U32 R3, R0, 0x2, RZ ; /* 0x0000000200037824 */
/* 0x001fe200078e00ff */
/*0070*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x002fc8000f8e023f */
/*0080*/ IADD3 R2, R3.reuse, 0x1, RZ ; /* 0x0000000103027810 */
/* 0x040fe20007ffe0ff */
/*0090*/ USHF.R.S32.HI UR6, URZ, 0x1f, UR4 ; /* 0x0000001f3f067899 */
/* 0x000fe20008011404 */
/*00a0*/ IADD3 R5, P2, R3.reuse, UR4, RZ ; /* 0x0000000403057c10 */
/* 0x040fe4000ff5e0ff */
/*00b0*/ ISETP.GE.AND P0, PT, R3.reuse, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x040fe40003f06270 */
/*00c0*/ ISETP.GE.AND P1, PT, R2, c[0x0][0x174], PT ; /* 0x00005d0002007a0c */
/* 0x000fe40003f26270 */
/*00d0*/ LEA.HI.X.SX32 R6, R3, UR6, 0x1, P2 ; /* 0x0000000603067c11 */
/* 0x000fe200090f0eff */
/*00e0*/ IMAD.MOV.U32 R3, RZ, RZ, RZ ; /* 0x000000ffff037224 */
/* 0x000fe200078e00ff */
/*00f0*/ LEA R4, P2, R5, c[0x0][0x168], 0x2 ; /* 0x00005a0005047a11 */
/* 0x000fc800078410ff */
/*0100*/ LEA.HI.X R5, R5, c[0x0][0x16c], R6, 0x2, P2 ; /* 0x00005b0005057a11 */
/* 0x000fca00010f1406 */
/*0110*/ @!P0 LDG.E R7, [R4.64] ; /* 0x0000000c04078981 */
/* 0x000ea8000c1e1900 */
/*0120*/ @!P1 LDG.E R3, [R4.64+0x4] ; /* 0x0000040c04039981 */
/* 0x000ee2000c1e1900 */
/*0130*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff087624 */
/* 0x000fe200078e00ff */
/*0140*/ ULDC UR5, c[0x0][0x0] ; /* 0x0000000000057ab9 */
/* 0x000fe20000000800 */
/*0150*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */
/* 0x000fe200078e00ff */
/*0160*/ USHF.L.U32 UR5, UR5, 0x3, URZ ; /* 0x0000000305057899 */
/* 0x000fe2000800063f */
/*0170*/ IMAD.SHL.U32 R8, R8, 0x2, RZ ; /* 0x0000000208087824 */
/* 0x000fca00078e00ff */
/*0180*/ ISETP.GE.AND P2, PT, R8, 0x2, PT ; /* 0x000000020800780c */
/* 0x000fe20003f46270 */
/*0190*/ STS [R0.X8], R7 ; /* 0x0000000700007388 */
/* 0x0041e80000008800 */
/*01a0*/ STS [R0.X8+0x4], R3 ; /* 0x0000040300007388 */
/* 0x0081f00000008800 */
/*01b0*/ @!P2 BRA 0x2c0 ; /* 0x000001000000a947 */
/* 0x000fea0003800000 */
/*01c0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x1 ; /* 0x00000001ff097424 */
/* 0x000fe400078e00ff */
/*01d0*/ IMAD.MOV.U32 R3, RZ, RZ, R8 ; /* 0x000000ffff037224 */
/* 0x001fca00078e0008 */
/*01e0*/ SHF.R.S32.HI R7, RZ, 0x1, R3 ; /* 0x00000001ff077819 */
/* 0x000fe20000011403 */
/*01f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe60000010000 */
/*0200*/ ISETP.GE.AND P3, PT, R0, R7, PT ; /* 0x000000070000720c */
/* 0x000fda0003f66270 */
/*0210*/ @!P3 IMAD R4, R2, R9, RZ ; /* 0x000000090204b224 */
/* 0x000fca00078e02ff */
/*0220*/ @!P3 SHF.L.U32 R6, R4, 0x2, RZ ; /* 0x000000020406b819 */
/* 0x000fe400000006ff */
/*0230*/ @!P3 LDS R4, [R4.X4+-0x4] ; /* 0xfffffc000404b984 */
/* 0x000fe60000004800 */
/*0240*/ @!P3 IMAD R6, R9.reuse, 0x4, R6 ; /* 0x000000040906b824 */
/* 0x040fe400078e0206 */
/*0250*/ IMAD.SHL.U32 R9, R9, 0x2, RZ ; /* 0x0000000209097824 */
/* 0x000fc600078e00ff */
/*0260*/ @!P3 LDS R5, [R6+-0x4] ; /* 0xfffffc000605b984 */
/* 0x000e240000000800 */
/*0270*/ @!P3 IMAD.IADD R5, R5, 0x1, R4 ; /* 0x000000010505b824 */
/* 0x001fca00078e0204 */
/*0280*/ @!P3 STS [R6+-0x4], R5 ; /* 0xfffffc050600b388 */
/* 0x0001e20000000800 */
/*0290*/ ISETP.GT.AND P3, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fe20003f64270 */
/*02a0*/ IMAD.MOV.U32 R3, RZ, RZ, R7 ; /* 0x000000ffff037224 */
/* 0x000fd800078e0007 */
/*02b0*/ @P3 BRA 0x1e0 ; /* 0xffffff2000003947 */
/* 0x001fea000383ffff */
/*02c0*/ ISETP.NE.AND P3, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f65270 */
/*02d0*/ UIADD3 UR7, UP0, UR4, -0x1, URZ ; /* 0xffffffff04077890 */
/* 0x000fe2000ff1e03f */
/*02e0*/ LDS R7, [UR5+-0x4] ; /* 0xfffffc05ff077984 */
/* 0x001e260008000800 */
/*02f0*/ UIADD3.X UR8, UR6, -0x1, URZ, UP0, !UPT ; /* 0xffffffff06087890 */
/* 0x000fd000087fe43f */
/*0300*/ @!P3 STS [UR5+-0x4], RZ ; /* 0xfffffcffff00b988 */
/* 0x000fe20008000805 */
/*0310*/ @!P2 BRA 0x450 ; /* 0x000001300000a947 */
/* 0x000fea0003800000 */
/*0320*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe40000000000 */
/*0330*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0340*/ ISETP.GE.AND P2, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf46270 */
/*0350*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */
/* 0x000fe2000800063f */
/*0360*/ SHF.R.S32.HI R9, RZ, 0x1, R9 ; /* 0x00000001ff097819 */
/* 0x000fc60000011409 */
/*0370*/ BSSY B0, 0x440 ; /* 0x000000c000007945 */
/* 0x000fe40003800000 */
/*0380*/ ISETP.LE.AND P3, PT, R8, UR4, PT ; /* 0x0000000408007c0c */
/* 0x000fcc000bf63270 */
/*0390*/ @P2 BRA 0x430 ; /* 0x0000009000002947 */
/* 0x002fea0003800000 */
/*03a0*/ IMAD R3, R2, R9, RZ ; /* 0x0000000902037224 */
/* 0x000fc800078e02ff */
/*03b0*/ IMAD.SHL.U32 R4, R3, 0x4, RZ ; /* 0x0000000403047824 */
/* 0x000fca00078e00ff */
/*03c0*/ LEA R11, R9, R4, 0x2 ; /* 0x00000004090b7211 */
/* 0x000fe400078e10ff */
/*03d0*/ LDS R4, [R3.X4+-0x4] ; /* 0xfffffc0003047984 */
/* 0x000fe80000004800 */
/*03e0*/ LDS R6, [R11+-0x4] ; /* 0xfffffc000b067984 */
/* 0x000e680000000800 */
/*03f0*/ STS [R3.X4+-0x4], R6 ; /* 0xfffffc0603007388 */
/* 0x002fe80000004800 */
/*0400*/ LDS R5, [R11+-0x4] ; /* 0xfffffc000b057984 */
/* 0x000e640000000800 */
/*0410*/ IMAD.IADD R4, R4, 0x1, R5 ; /* 0x0000000104047824 */
/* 0x002fca00078e0205 */
/*0420*/ STS [R11+-0x4], R4 ; /* 0xfffffc040b007388 */
/* 0x0003e40000000800 */
/*0430*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0440*/ @!P3 BRA 0x330 ; /* 0xfffffee00000b947 */
/* 0x000fea000383ffff */
/*0450*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0460*/ IADD3 R3, P2, R2, UR7, RZ ; /* 0x0000000702037c10 */
/* 0x000fc8000ff5e0ff */
/*0470*/ LEA.HI.X.SX32 R4, R2, UR8, 0x1, P2 ; /* 0x0000000802047c11 */
/* 0x002fe400090f0eff */
/*0480*/ LEA R2, P2, R3, c[0x0][0x160], 0x2 ; /* 0x0000580003027a11 */
/* 0x000fc800078410ff */
/*0490*/ LEA.HI.X R3, R3, c[0x0][0x164], R4, 0x2, P2 ; /* 0x0000590003037a11 */
/* 0x000fe200010f1404 */
/*04a0*/ @!P0 LDS R5, [R0.X8] ; /* 0x0000000000058984 */
/* 0x000e680000008800 */
/*04b0*/ @!P0 STG.E [R2.64+-0x4], R5 ; /* 0xfffffc0502008986 */
/* 0x0023e2000c10190c */
/*04c0*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*04d0*/ LDS R9, [R0.X8+0x4] ; /* 0x0000040000097984 */
/* 0x000ea20000008800 */
/*04e0*/ ULDC UR6, c[0x0][0x174] ; /* 0x00005d0000067ab9 */
/* 0x000fe40000000800 */
/*04f0*/ UIADD3 UR5, UP0, UR7, UR6, URZ ; /* 0x0000000607057290 */
/* 0x000fe4000ff1e03f */
/*0500*/ ULDC.64 UR10, c[0x0][0x160] ; /* 0x00005800000a7ab9 */
/* 0x000fc40000000a00 */
/*0510*/ ULEA.HI.X.SX32 UR6, UR6, UR8, 0x1, UP0 ; /* 0x0000000806067291 */
/* 0x000fe400080f0e3f */
/*0520*/ ULEA UR4, UP0, UR5, UR10, 0x2 ; /* 0x0000000a05047291 */
/* 0x000fc8000f80103f */
/*0530*/ ULEA.HI.X UR5, UR5, UR11, UR6, 0x2, UP0 ; /* 0x0000000b05057291 */
/* 0x000fe400080f1406 */
/*0540*/ IMAD.U32 R4, RZ, RZ, UR4 ; /* 0x00000004ff047e24 */
/* 0x000fc8000f8e00ff */
/*0550*/ IMAD.U32 R5, RZ, RZ, UR5 ; /* 0x00000005ff057e24 */
/* 0x002fe2000f8e00ff */
/*0560*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x004fe8000c10190c */
/*0570*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x001fe2000c10190c */
/*0580*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0590*/ BRA 0x590; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0600*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0610*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0620*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0630*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0640*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z9transposePiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.Y ; /* 0x0000000000067919 */
/* 0x000e220000002200 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R7, SR_CTAID.Y ; /* 0x0000000000077919 */
/* 0x000e280000002600 */
/*0040*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e680000002500 */
/*0050*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e620000002100 */
/*0060*/ IMAD R3, R7, c[0x0][0x4], R6 ; /* 0x0000010007037a24 */
/* 0x001fca00078e0206 */
/*0070*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fe20003f06270 */
/*0080*/ IMAD R0, R5, c[0x0][0x0], R4 ; /* 0x0000000005007a24 */
/* 0x002fca00078e0204 */
/*0090*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fda0000706670 */
/*00a0*/ @!P0 MOV R2, 0x4 ; /* 0x0000000400028802 */
/* 0x000fe20000000f00 */
/*00b0*/ @!P0 IMAD R3, R3, c[0x0][0x170], R0 ; /* 0x00005c0003038a24 */
/* 0x000fc800078e0200 */
/*00c0*/ @!P0 IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003028625 */
/* 0x000fcc00078e0202 */
/*00d0*/ @!P0 LDG.E R2, [R2.64] ; /* 0x0000000402028981 */
/* 0x000ea2000c1e1900 */
/*00e0*/ IMAD R0, R7, c[0x0][0x4], R4 ; /* 0x0000010007007a24 */
/* 0x000fe200078e0204 */
/*00f0*/ @!P0 LEA R7, R6, R4, 0x5 ; /* 0x0000000406078211 */
/* 0x000fe200078e28ff */
/*0100*/ IMAD R5, R5, c[0x0][0x0], R6 ; /* 0x0000000005057a24 */
/* 0x000fc600078e0206 */
/*0110*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fc80003f26270 */
/*0120*/ ISETP.GE.OR P1, PT, R5, c[0x0][0x170], P1 ; /* 0x00005c0005007a0c */
/* 0x000fe20000f26670 */
/*0130*/ @!P0 STS [R7.X4], R2 ; /* 0x0000000207008388 */
/* 0x0041e80000004800 */
/*0140*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*0150*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0160*/ LEA R7, R4, R6, 0x5 ; /* 0x0000000604077211 */
/* 0x001fe200078e28ff */
/*0170*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0180*/ IMAD R2, R5, c[0x0][0x174], R0 ; /* 0x00005d0005027a24 */
/* 0x000fc800078e0200 */
/*0190*/ LDS R7, [R7.X4] ; /* 0x0000000007077984 */
/* 0x000e2a0000004800 */
/*01a0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fca00078e0203 */
/*01b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x001fe2000c101904 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#define WIDTH 7
#define HEIGHT 3
#define THREADS_PER_BLOCK 32
#define CUDA_CHECK(err) if(err != cudaSuccess)\
{\
printf("cudaMalloc returned error %s (code %d) (file %s) (line %d)\n", cudaGetErrorString(err), err, __FILE__, __LINE__);\
}\
__global__ void transpose(int *input, int *output, int width, int height)
{
__shared__ int temp[THREADS_PER_BLOCK][THREADS_PER_BLOCK];
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
int id_in = yIndex * width + xIndex;
temp[threadIdx.y][threadIdx.x] = input[id_in];
}
__syncthreads();
xIndex = blockIdx.y * blockDim.y + threadIdx.x;
yIndex = blockIdx.x * blockDim.x + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
int id_out = yIndex * height + xIndex;
output[id_out] = temp[threadIdx.x][threadIdx.y];
}
}
inline __device__
void PrefixSum(int* output, int* input, int w, int nextpow2)
{
extern __shared__ int temp[];
const int tdx = threadIdx.x;
int offset = 1;
const int tdx2 = 2*tdx;
const int tdx2p = tdx2 + 1;
temp[tdx2] = tdx2 < w ? input[tdx2] : 0;
temp[tdx2p] = tdx2p < w ? input[tdx2p] : 0;
for(int d = nextpow2>>1; d > 0; d >>= 1) {
__syncthreads();
if(tdx < d)
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
int last = temp[nextpow2 - 1];
if(tdx == 0) temp[nextpow2 - 1] = 0;
for(int d = 1; d < nextpow2; d *= 2) {
offset >>= 1;
__syncthreads();
if(tdx < d )
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if(tdx2 < w) output[tdx2] = temp[tdx2];
if(tdx2p < w) output[tdx2p] = temp[tdx2p];
if(tdx2p < w) output[w] = last;
}
__global__ void KernPrefixSumRows(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*width-1, in+row*width, width, 2*blockDim.x );
}
__global__ void KernPrefixSumRowsTrans(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*(width+1)+(width+1), in+row*width, width, 2*blockDim.x );
}
void PrefixSumRows(int *out, int *in, int *outT, int height, int width)
{
dim3 blockDim = dim3( 1, 1);
while(blockDim.x < ceil(width/2.0f)) blockDim.x <<= 1;
dim3 gridDim = dim3( 1, height );
KernPrefixSumRows<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,in,height,width);
cudaDeviceSynchronize();
dim3 gridSize, blockSize;
gridSize.x = (int)((width + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((height + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, width, height);
cudaDeviceSynchronize();
memset(out, 0, (HEIGHT+1)*sizeof(int));
blockDim = dim3( 1, 1);
while(blockDim.x < ceil((height)/2.0f)) blockDim.x <<= 1;
gridDim = dim3( 1, width );
KernPrefixSumRowsTrans<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,outT,width,height);
cudaDeviceSynchronize();
gridSize.x = (int)((height+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((width+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, height+1, width+1);
cudaDeviceSynchronize();
}
void ComputeIntegrals(const unsigned char *Img, int *Integral) {
const int SUM_WIDTH_STEP = (WIDTH+1);
#define SUM_TYPE int
int iW = WIDTH; // image dimensions
int iH = HEIGHT;
int sW = WIDTH+1; // sum dimensions
unsigned char *ImgPtr = 0;
SUM_TYPE *IntegPtr = 0;
// write zeros to first row
memset(Integral, 0, (WIDTH+1)*sizeof(int));
//#if WITH_CUDA
// CudaComputeIntegralImages(Img, Integral, TiltedIntegral, SUM_WIDTH_STEP, cudaComputeStream);
//#else
{
int yy=1;
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
// compute regular integral and first pass of tilted
for(int yy=2; yy<=iH; yy++){
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
printf("\n\n");
//#endif
}
int main() {
unsigned char *Img=0;
int *ImgInt=0;
int *Integral=0;
int *IntegralTransposed=0;
clock_t start, end;
CUDA_CHECK( cudaMallocManaged((void **) &Img, WIDTH*HEIGHT) );
CUDA_CHECK( cudaMallocManaged((void **) &ImgInt, WIDTH*HEIGHT*sizeof(int)) );
CUDA_CHECK( cudaMallocManaged((void **) &Integral, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
CUDA_CHECK( cudaMallocManaged((void **) &IntegralTransposed, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
for (int i=0; i<WIDTH*HEIGHT; i++) Img[i] = 1;
for (int i=0; i<WIDTH*HEIGHT; i++) ImgInt[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) IntegralTransposed[i] = 1;
start = clock();
ComputeIntegrals(Img, Integral);
end = clock();
printf("CPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
int *IntegPtr;
unsigned char *ImgPtr;
// input
printf("Input\n\n");
for (int i=0; i<HEIGHT; i++) {
for (int j=0; j<WIDTH; j++) {
ImgPtr = Img + i * WIDTH + j;
printf("%d ", *ImgPtr);
}
printf("\n");
}
printf("\n\n");
printf("Output CPU");
printf("\n\n");
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = Integral + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
printf("\n\n");
printf("OUTPUT GPU");
printf("\n\n");
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 0;
//CudaComputeIntegralImages(ImgInt, Integral, IntegralTransposed);
start = clock();
PrefixSumRows(Integral, ImgInt, IntegralTransposed, HEIGHT, WIDTH);
end = clock();
printf("GPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = IntegralTransposed + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
cudaFree(Img);
cudaFree(ImgInt);
cudaFree(Integral);
cudaFree(IntegralTransposed);
return 0;
} | .file "tmpxft_00139c64_00000000-6_integralImageExclusive.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "\n\n"
.text
.globl _Z16ComputeIntegralsPKhPi
.type _Z16ComputeIntegralsPKhPi, @function
_Z16ComputeIntegralsPKhPi:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq %rdi, %r8
pxor %xmm0, %xmm0
movups %xmm0, (%rsi)
movups %xmm0, 16(%rsi)
leaq 32(%rsi), %rdx
movl $0, 32(%rsi)
leaq 6(%rdi), %rdi
movq %r8, %rcx
.L4:
addq $1, %rcx
addq $4, %rdx
movzbl -1(%rcx), %eax
addl -4(%rdx), %eax
subl -36(%rdx), %eax
addl -32(%rdx), %eax
movl %eax, (%rdx)
cmpq %rdi, %rcx
jne .L4
movzbl 6(%r8), %eax
addl 56(%rsi), %eax
subl 24(%rsi), %eax
addl 28(%rsi), %eax
movl %eax, 60(%rsi)
leaq 13(%r8), %rdi
addq $64, %rsi
addq $27, %r8
.L6:
leaq -6(%rdi), %rcx
movq %rsi, %r9
movl $0, (%rsi)
movq %rsi, %rdx
.L5:
addq $1, %rcx
addq $4, %rdx
movzbl -1(%rcx), %eax
addl -4(%rdx), %eax
subl -36(%rdx), %eax
addl -32(%rdx), %eax
movl %eax, (%rdx)
cmpq %rdi, %rcx
jne .L5
movzbl (%rcx), %eax
addl 24(%r9), %eax
subl -8(%r9), %eax
addl -4(%r9), %eax
movl %eax, 28(%r9)
addq $7, %rdi
addq $32, %rsi
cmpq %rdi, %r8
jne .L6
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z16ComputeIntegralsPKhPi, .-_Z16ComputeIntegralsPKhPi
.globl _Z32__device_stub__Z9transposePiS_iiPiS_ii
.type _Z32__device_stub__Z9transposePiS_iiPiS_ii, @function
_Z32__device_stub__Z9transposePiS_iiPiS_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9transposePiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z32__device_stub__Z9transposePiS_iiPiS_ii, .-_Z32__device_stub__Z9transposePiS_iiPiS_ii
.globl _Z9transposePiS_ii
.type _Z9transposePiS_ii, @function
_Z9transposePiS_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9transposePiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z9transposePiS_ii, .-_Z9transposePiS_ii
.globl _Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii
.type _Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii, @function
_Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17KernPrefixSumRowsPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii, .-_Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii
.globl _Z17KernPrefixSumRowsPiS_ii
.type _Z17KernPrefixSumRowsPiS_ii, @function
_Z17KernPrefixSumRowsPiS_ii:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z17KernPrefixSumRowsPiS_ii, .-_Z17KernPrefixSumRowsPiS_ii
.globl _Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii
.type _Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii, @function
_Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z22KernPrefixSumRowsTransPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii, .-_Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii
.globl _Z22KernPrefixSumRowsTransPiS_ii
.type _Z22KernPrefixSumRowsTransPiS_ii, @function
_Z22KernPrefixSumRowsTransPiS_ii:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z22KernPrefixSumRowsTransPiS_ii, .-_Z22KernPrefixSumRowsTransPiS_ii
.globl _Z13PrefixSumRowsPiS_S_ii
.type _Z13PrefixSumRowsPiS_S_ii, @function
_Z13PrefixSumRowsPiS_S_ii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $48, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r12
movq %rsi, %r14
movq %rdx, %r13
movl %ecx, %ebx
movl %r8d, %ebp
movl $1, 4(%rsp)
movl $1, 8(%rsp)
pxor %xmm0, %xmm0
cvtsi2ssl %r8d, %xmm0
mulss .LC1(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC5(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC2(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L36
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm1
movss .LC4(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L36:
movl $1, %eax
jmp .L37
.L38:
addl %eax, %eax
.L37:
movl %eax, %edx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
comiss %xmm0, %xmm1
ja .L38
movl $1, 12(%rsp)
movl %ebx, 16(%rsp)
movl $1, 20(%rsp)
movl %eax, (%rsp)
movl %eax, %eax
movl 8(%rsp), %ecx
movl $0, %r9d
leaq 0(,%rax,8), %r8
movq (%rsp), %rdx
movq 12(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L51
.L41:
call cudaDeviceSynchronize@PLT
movl $1, 32(%rsp)
movl $1, 44(%rsp)
leal 62(%rbp), %eax
movl %ebp, %edx
addl $31, %edx
cmovns %edx, %eax
sarl $5, %eax
movl %eax, 24(%rsp)
leal 62(%rbx), %eax
movl %ebx, %edx
addl $31, %edx
cmovns %edx, %eax
sarl $5, %eax
movl %eax, 28(%rsp)
movl $32, 36(%rsp)
movl $32, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L42:
call cudaDeviceSynchronize@PLT
pxor %xmm0, %xmm0
movups %xmm0, (%r12)
pxor %xmm0, %xmm0
cvtsi2ssl %ebx, %xmm0
mulss .LC1(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC5(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC2(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L43
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm1
movss .LC4(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L43:
movl $1, %eax
jmp .L44
.L51:
movl %ebp, %ecx
movl %ebx, %edx
movq %r14, %rsi
movq %r12, %rdi
call _Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii
jmp .L41
.L52:
movl %ebx, %ecx
movl %ebp, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z32__device_stub__Z9transposePiS_iiPiS_ii
jmp .L42
.L45:
addl %eax, %eax
.L44:
movl %eax, %edx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
comiss %xmm0, %xmm1
ja .L45
movl %ebp, 16(%rsp)
movl %eax, (%rsp)
movl %eax, %eax
movl 8(%rsp), %ecx
movl $0, %r9d
leaq 0(,%rax,8), %r8
movq (%rsp), %rdx
movq 12(%rsp), %rdi
movl 20(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L48:
call cudaDeviceSynchronize@PLT
leal 63(%rbx), %eax
movl %ebx, %edx
addl $32, %edx
cmovns %edx, %eax
sarl $5, %eax
movl %eax, 24(%rsp)
leal 63(%rbp), %eax
movl %ebp, %edx
addl $32, %edx
cmovns %edx, %eax
sarl $5, %eax
movl %eax, 28(%rsp)
movl 44(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movq 24(%rsp), %rdi
movl 32(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L49:
call cudaDeviceSynchronize@PLT
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
movl %ebx, %ecx
movl %ebp, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii
jmp .L48
.L54:
leal 1(%rbp), %ecx
leal 1(%rbx), %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z32__device_stub__Z9transposePiS_iiPiS_ii
jmp .L49
.cfi_endproc
.LFE2058:
.size _Z13PrefixSumRowsPiS_S_ii, .-_Z13PrefixSumRowsPiS_S_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "/home/ubuntu/Datasets/stackv2/train-structured/r-abishek/standaloneCodeTrials/main/integralImage/integralImageExclusive.cu"
.align 8
.LC8:
.string "cudaMalloc returned error %s (code %d) (file %s) (line %d)\n"
.section .rodata.str1.1
.LC10:
.string "CPU Time Taken: %f\n"
.LC11:
.string "Input\n\n"
.LC12:
.string "%d "
.LC13:
.string "\n"
.LC14:
.string "Output CPU"
.LC15:
.string "OUTPUT GPU"
.LC16:
.string "GPU Time Taken: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $48, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movq $0, 32(%rsp)
leaq 8(%rsp), %rdi
movl $1, %edx
movl $21, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L84
.L56:
leaq 16(%rsp), %rdi
movl $1, %edx
movl $84, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L85
.L57:
leaq 24(%rsp), %rdi
movl $1, %edx
movl $128, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L86
.L58:
leaq 32(%rsp), %rdi
movl $1, %edx
movl $128, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L87
.L59:
movl $0, %eax
.L60:
movq 8(%rsp), %rdx
movb $1, (%rdx,%rax)
addq $1, %rax
cmpq $21, %rax
jne .L60
movl $0, %eax
.L61:
movq 16(%rsp), %rdx
movl $1, (%rdx,%rax)
addq $4, %rax
cmpq $84, %rax
jne .L61
movl $0, %eax
.L62:
movq 24(%rsp), %rdx
movl $1, (%rdx,%rax)
addq $4, %rax
cmpq $128, %rax
jne .L62
movl $0, %eax
.L63:
movq 32(%rsp), %rdx
movl $1, (%rdx,%rax)
addq $4, %rax
cmpq $128, %rax
jne .L63
call clock@PLT
movq %rax, %rbx
movq 24(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z16ComputeIntegralsPKhPi
call clock@PLT
subq %rbx, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC9(%rip), %xmm0
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $7, %ebp
leaq .LC12(%rip), %r12
leaq .LC13(%rip), %r13
.L64:
leaq -7(%rbp), %rbx
.L65:
movq 8(%rsp), %rax
movzbl (%rax,%rbx), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq %rbp, %rbx
jne .L65
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $7, %rbp
cmpq $28, %rbp
jne .L64
leaq .LC0(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $32, %r12d
movl $0, %ebp
leaq .LC12(%rip), %r13
leaq .LC13(%rip), %r14
.L67:
leaq 0(,%rbp,4), %rbx
.L68:
movq 24(%rsp), %rax
movl (%rax,%rbx), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L68
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rbp
addq $32, %r12
cmpq $32, %rbp
jne .L67
leaq .LC0(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
.L70:
movq 24(%rsp), %rdx
movl $0, (%rdx,%rax)
addq $4, %rax
cmpq $128, %rax
jne .L70
call clock@PLT
movq %rax, %rbx
movl $7, %r8d
movl $3, %ecx
movq 32(%rsp), %rdx
movq 16(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z13PrefixSumRowsPiS_S_ii
call clock@PLT
subq %rbx, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC9(%rip), %xmm0
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $0, %r13d
leaq .LC12(%rip), %r12
leaq .LC13(%rip), %r14
.L71:
leaq 0(,%r13,4), %rbx
.L72:
movq 32(%rsp), %rax
movl (%rax,%rbx), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L72
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %r13
addq $32, %rbp
cmpq $32, %r13
jne .L71
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L88
movl $0, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L84:
.cfi_restore_state
leaq 8(%rsp), %rbp
movl $1, %edx
movl $21, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %ebx
movl $1, %edx
movl $21, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $231, %r9d
leaq .LC7(%rip), %r8
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L56
.L85:
leaq 16(%rsp), %rbp
movl $1, %edx
movl $84, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %ebx
movl $1, %edx
movl $84, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $232, %r9d
leaq .LC7(%rip), %r8
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L57
.L86:
leaq 24(%rsp), %rbp
movl $1, %edx
movl $128, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %ebx
movl $1, %edx
movl $128, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $233, %r9d
leaq .LC7(%rip), %r8
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L58
.L87:
leaq 32(%rsp), %rbp
movl $1, %edx
movl $128, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %ebx
movl $1, %edx
movl $128, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $234, %r9d
leaq .LC7(%rip), %r8
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L59
.L88:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC17:
.string "_Z22KernPrefixSumRowsTransPiS_ii"
.section .rodata.str1.1
.LC18:
.string "_Z17KernPrefixSumRowsPiS_ii"
.LC19:
.string "_Z9transposePiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2092:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _Z22KernPrefixSumRowsTransPiS_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z17KernPrefixSumRowsPiS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z9transposePiS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1056964608
.align 4
.LC2:
.long 1258291200
.align 4
.LC4:
.long 1065353216
.align 4
.LC5:
.long 2147483647
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC9:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#define WIDTH 7
#define HEIGHT 3
#define THREADS_PER_BLOCK 32
#define CUDA_CHECK(err) if(err != cudaSuccess)\
{\
printf("cudaMalloc returned error %s (code %d) (file %s) (line %d)\n", cudaGetErrorString(err), err, __FILE__, __LINE__);\
}\
__global__ void transpose(int *input, int *output, int width, int height)
{
__shared__ int temp[THREADS_PER_BLOCK][THREADS_PER_BLOCK];
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
int id_in = yIndex * width + xIndex;
temp[threadIdx.y][threadIdx.x] = input[id_in];
}
__syncthreads();
xIndex = blockIdx.y * blockDim.y + threadIdx.x;
yIndex = blockIdx.x * blockDim.x + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
int id_out = yIndex * height + xIndex;
output[id_out] = temp[threadIdx.x][threadIdx.y];
}
}
inline __device__
void PrefixSum(int* output, int* input, int w, int nextpow2)
{
extern __shared__ int temp[];
const int tdx = threadIdx.x;
int offset = 1;
const int tdx2 = 2*tdx;
const int tdx2p = tdx2 + 1;
temp[tdx2] = tdx2 < w ? input[tdx2] : 0;
temp[tdx2p] = tdx2p < w ? input[tdx2p] : 0;
for(int d = nextpow2>>1; d > 0; d >>= 1) {
__syncthreads();
if(tdx < d)
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
int last = temp[nextpow2 - 1];
if(tdx == 0) temp[nextpow2 - 1] = 0;
for(int d = 1; d < nextpow2; d *= 2) {
offset >>= 1;
__syncthreads();
if(tdx < d )
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if(tdx2 < w) output[tdx2] = temp[tdx2];
if(tdx2p < w) output[tdx2p] = temp[tdx2p];
if(tdx2p < w) output[w] = last;
}
__global__ void KernPrefixSumRows(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*width-1, in+row*width, width, 2*blockDim.x );
}
__global__ void KernPrefixSumRowsTrans(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*(width+1)+(width+1), in+row*width, width, 2*blockDim.x );
}
void PrefixSumRows(int *out, int *in, int *outT, int height, int width)
{
dim3 blockDim = dim3( 1, 1);
while(blockDim.x < ceil(width/2.0f)) blockDim.x <<= 1;
dim3 gridDim = dim3( 1, height );
KernPrefixSumRows<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,in,height,width);
cudaDeviceSynchronize();
dim3 gridSize, blockSize;
gridSize.x = (int)((width + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((height + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, width, height);
cudaDeviceSynchronize();
memset(out, 0, (HEIGHT+1)*sizeof(int));
blockDim = dim3( 1, 1);
while(blockDim.x < ceil((height)/2.0f)) blockDim.x <<= 1;
gridDim = dim3( 1, width );
KernPrefixSumRowsTrans<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,outT,width,height);
cudaDeviceSynchronize();
gridSize.x = (int)((height+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((width+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, height+1, width+1);
cudaDeviceSynchronize();
}
void ComputeIntegrals(const unsigned char *Img, int *Integral) {
const int SUM_WIDTH_STEP = (WIDTH+1);
#define SUM_TYPE int
int iW = WIDTH; // image dimensions
int iH = HEIGHT;
int sW = WIDTH+1; // sum dimensions
unsigned char *ImgPtr = 0;
SUM_TYPE *IntegPtr = 0;
// write zeros to first row
memset(Integral, 0, (WIDTH+1)*sizeof(int));
//#if WITH_CUDA
// CudaComputeIntegralImages(Img, Integral, TiltedIntegral, SUM_WIDTH_STEP, cudaComputeStream);
//#else
{
int yy=1;
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
// compute regular integral and first pass of tilted
for(int yy=2; yy<=iH; yy++){
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
printf("\n\n");
//#endif
}
int main() {
unsigned char *Img=0;
int *ImgInt=0;
int *Integral=0;
int *IntegralTransposed=0;
clock_t start, end;
CUDA_CHECK( cudaMallocManaged((void **) &Img, WIDTH*HEIGHT) );
CUDA_CHECK( cudaMallocManaged((void **) &ImgInt, WIDTH*HEIGHT*sizeof(int)) );
CUDA_CHECK( cudaMallocManaged((void **) &Integral, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
CUDA_CHECK( cudaMallocManaged((void **) &IntegralTransposed, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
for (int i=0; i<WIDTH*HEIGHT; i++) Img[i] = 1;
for (int i=0; i<WIDTH*HEIGHT; i++) ImgInt[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) IntegralTransposed[i] = 1;
start = clock();
ComputeIntegrals(Img, Integral);
end = clock();
printf("CPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
int *IntegPtr;
unsigned char *ImgPtr;
// input
printf("Input\n\n");
for (int i=0; i<HEIGHT; i++) {
for (int j=0; j<WIDTH; j++) {
ImgPtr = Img + i * WIDTH + j;
printf("%d ", *ImgPtr);
}
printf("\n");
}
printf("\n\n");
printf("Output CPU");
printf("\n\n");
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = Integral + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
printf("\n\n");
printf("OUTPUT GPU");
printf("\n\n");
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 0;
//CudaComputeIntegralImages(ImgInt, Integral, IntegralTransposed);
start = clock();
PrefixSumRows(Integral, ImgInt, IntegralTransposed, HEIGHT, WIDTH);
end = clock();
printf("GPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = IntegralTransposed + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
cudaFree(Img);
cudaFree(ImgInt);
cudaFree(Integral);
cudaFree(IntegralTransposed);
return 0;
} | #include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#define WIDTH 7
#define HEIGHT 3
#define THREADS_PER_BLOCK 32
#define CUDA_CHECK(err) if(err != hipSuccess)\
{\
printf("hipMalloc returned error %s (code %d) (file %s) (line %d)\n", hipGetErrorString(err), err, __FILE__, __LINE__);\
}\
__global__ void transpose(int *input, int *output, int width, int height)
{
__shared__ int temp[THREADS_PER_BLOCK][THREADS_PER_BLOCK];
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
int id_in = yIndex * width + xIndex;
temp[threadIdx.y][threadIdx.x] = input[id_in];
}
__syncthreads();
xIndex = blockIdx.y * blockDim.y + threadIdx.x;
yIndex = blockIdx.x * blockDim.x + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
int id_out = yIndex * height + xIndex;
output[id_out] = temp[threadIdx.x][threadIdx.y];
}
}
inline __device__
void PrefixSum(int* output, int* input, int w, int nextpow2)
{
extern __shared__ int temp[];
const int tdx = threadIdx.x;
int offset = 1;
const int tdx2 = 2*tdx;
const int tdx2p = tdx2 + 1;
temp[tdx2] = tdx2 < w ? input[tdx2] : 0;
temp[tdx2p] = tdx2p < w ? input[tdx2p] : 0;
for(int d = nextpow2>>1; d > 0; d >>= 1) {
__syncthreads();
if(tdx < d)
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
int last = temp[nextpow2 - 1];
if(tdx == 0) temp[nextpow2 - 1] = 0;
for(int d = 1; d < nextpow2; d *= 2) {
offset >>= 1;
__syncthreads();
if(tdx < d )
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if(tdx2 < w) output[tdx2] = temp[tdx2];
if(tdx2p < w) output[tdx2p] = temp[tdx2p];
if(tdx2p < w) output[w] = last;
}
__global__ void KernPrefixSumRows(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*width-1, in+row*width, width, 2*blockDim.x );
}
__global__ void KernPrefixSumRowsTrans(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*(width+1)+(width+1), in+row*width, width, 2*blockDim.x );
}
void PrefixSumRows(int *out, int *in, int *outT, int height, int width)
{
dim3 blockDim = dim3( 1, 1);
while(blockDim.x < ceil(width/2.0f)) blockDim.x <<= 1;
dim3 gridDim = dim3( 1, height );
KernPrefixSumRows<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,in,height,width);
hipDeviceSynchronize();
dim3 gridSize, blockSize;
gridSize.x = (int)((width + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((height + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, width, height);
hipDeviceSynchronize();
memset(out, 0, (HEIGHT+1)*sizeof(int));
blockDim = dim3( 1, 1);
while(blockDim.x < ceil((height)/2.0f)) blockDim.x <<= 1;
gridDim = dim3( 1, width );
KernPrefixSumRowsTrans<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,outT,width,height);
hipDeviceSynchronize();
gridSize.x = (int)((height+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((width+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, height+1, width+1);
hipDeviceSynchronize();
}
void ComputeIntegrals(const unsigned char *Img, int *Integral) {
const int SUM_WIDTH_STEP = (WIDTH+1);
#define SUM_TYPE int
int iW = WIDTH; // image dimensions
int iH = HEIGHT;
int sW = WIDTH+1; // sum dimensions
unsigned char *ImgPtr = 0;
SUM_TYPE *IntegPtr = 0;
// write zeros to first row
memset(Integral, 0, (WIDTH+1)*sizeof(int));
//#if WITH_CUDA
// CudaComputeIntegralImages(Img, Integral, TiltedIntegral, SUM_WIDTH_STEP, cudaComputeStream);
//#else
{
int yy=1;
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
// compute regular integral and first pass of tilted
for(int yy=2; yy<=iH; yy++){
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
printf("\n\n");
//#endif
}
int main() {
unsigned char *Img=0;
int *ImgInt=0;
int *Integral=0;
int *IntegralTransposed=0;
clock_t start, end;
CUDA_CHECK( hipMallocManaged((void **) &Img, WIDTH*HEIGHT) );
CUDA_CHECK( hipMallocManaged((void **) &ImgInt, WIDTH*HEIGHT*sizeof(int)) );
CUDA_CHECK( hipMallocManaged((void **) &Integral, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
CUDA_CHECK( hipMallocManaged((void **) &IntegralTransposed, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
for (int i=0; i<WIDTH*HEIGHT; i++) Img[i] = 1;
for (int i=0; i<WIDTH*HEIGHT; i++) ImgInt[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) IntegralTransposed[i] = 1;
start = clock();
ComputeIntegrals(Img, Integral);
end = clock();
printf("CPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
int *IntegPtr;
unsigned char *ImgPtr;
// input
printf("Input\n\n");
for (int i=0; i<HEIGHT; i++) {
for (int j=0; j<WIDTH; j++) {
ImgPtr = Img + i * WIDTH + j;
printf("%d ", *ImgPtr);
}
printf("\n");
}
printf("\n\n");
printf("Output CPU");
printf("\n\n");
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = Integral + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
printf("\n\n");
printf("OUTPUT GPU");
printf("\n\n");
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 0;
//CudaComputeIntegralImages(ImgInt, Integral, IntegralTransposed);
start = clock();
PrefixSumRows(Integral, ImgInt, IntegralTransposed, HEIGHT, WIDTH);
end = clock();
printf("GPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = IntegralTransposed + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
hipFree(Img);
hipFree(ImgInt);
hipFree(Integral);
hipFree(IntegralTransposed);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#define WIDTH 7
#define HEIGHT 3
#define THREADS_PER_BLOCK 32
#define CUDA_CHECK(err) if(err != hipSuccess)\
{\
printf("hipMalloc returned error %s (code %d) (file %s) (line %d)\n", hipGetErrorString(err), err, __FILE__, __LINE__);\
}\
__global__ void transpose(int *input, int *output, int width, int height)
{
__shared__ int temp[THREADS_PER_BLOCK][THREADS_PER_BLOCK];
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
int id_in = yIndex * width + xIndex;
temp[threadIdx.y][threadIdx.x] = input[id_in];
}
__syncthreads();
xIndex = blockIdx.y * blockDim.y + threadIdx.x;
yIndex = blockIdx.x * blockDim.x + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
int id_out = yIndex * height + xIndex;
output[id_out] = temp[threadIdx.x][threadIdx.y];
}
}
inline __device__
void PrefixSum(int* output, int* input, int w, int nextpow2)
{
extern __shared__ int temp[];
const int tdx = threadIdx.x;
int offset = 1;
const int tdx2 = 2*tdx;
const int tdx2p = tdx2 + 1;
temp[tdx2] = tdx2 < w ? input[tdx2] : 0;
temp[tdx2p] = tdx2p < w ? input[tdx2p] : 0;
for(int d = nextpow2>>1; d > 0; d >>= 1) {
__syncthreads();
if(tdx < d)
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
int last = temp[nextpow2 - 1];
if(tdx == 0) temp[nextpow2 - 1] = 0;
for(int d = 1; d < nextpow2; d *= 2) {
offset >>= 1;
__syncthreads();
if(tdx < d )
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if(tdx2 < w) output[tdx2] = temp[tdx2];
if(tdx2p < w) output[tdx2p] = temp[tdx2p];
if(tdx2p < w) output[w] = last;
}
__global__ void KernPrefixSumRows(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*width-1, in+row*width, width, 2*blockDim.x );
}
__global__ void KernPrefixSumRowsTrans(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*(width+1)+(width+1), in+row*width, width, 2*blockDim.x );
}
void PrefixSumRows(int *out, int *in, int *outT, int height, int width)
{
dim3 blockDim = dim3( 1, 1);
while(blockDim.x < ceil(width/2.0f)) blockDim.x <<= 1;
dim3 gridDim = dim3( 1, height );
KernPrefixSumRows<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,in,height,width);
hipDeviceSynchronize();
dim3 gridSize, blockSize;
gridSize.x = (int)((width + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((height + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, width, height);
hipDeviceSynchronize();
memset(out, 0, (HEIGHT+1)*sizeof(int));
blockDim = dim3( 1, 1);
while(blockDim.x < ceil((height)/2.0f)) blockDim.x <<= 1;
gridDim = dim3( 1, width );
KernPrefixSumRowsTrans<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,outT,width,height);
hipDeviceSynchronize();
gridSize.x = (int)((height+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((width+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, height+1, width+1);
hipDeviceSynchronize();
}
void ComputeIntegrals(const unsigned char *Img, int *Integral) {
const int SUM_WIDTH_STEP = (WIDTH+1);
#define SUM_TYPE int
int iW = WIDTH; // image dimensions
int iH = HEIGHT;
int sW = WIDTH+1; // sum dimensions
unsigned char *ImgPtr = 0;
SUM_TYPE *IntegPtr = 0;
// write zeros to first row
memset(Integral, 0, (WIDTH+1)*sizeof(int));
//#if WITH_CUDA
// CudaComputeIntegralImages(Img, Integral, TiltedIntegral, SUM_WIDTH_STEP, cudaComputeStream);
//#else
{
int yy=1;
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
// compute regular integral and first pass of tilted
for(int yy=2; yy<=iH; yy++){
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
printf("\n\n");
//#endif
}
int main() {
unsigned char *Img=0;
int *ImgInt=0;
int *Integral=0;
int *IntegralTransposed=0;
clock_t start, end;
CUDA_CHECK( hipMallocManaged((void **) &Img, WIDTH*HEIGHT) );
CUDA_CHECK( hipMallocManaged((void **) &ImgInt, WIDTH*HEIGHT*sizeof(int)) );
CUDA_CHECK( hipMallocManaged((void **) &Integral, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
CUDA_CHECK( hipMallocManaged((void **) &IntegralTransposed, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
for (int i=0; i<WIDTH*HEIGHT; i++) Img[i] = 1;
for (int i=0; i<WIDTH*HEIGHT; i++) ImgInt[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) IntegralTransposed[i] = 1;
start = clock();
ComputeIntegrals(Img, Integral);
end = clock();
printf("CPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
int *IntegPtr;
unsigned char *ImgPtr;
// input
printf("Input\n\n");
for (int i=0; i<HEIGHT; i++) {
for (int j=0; j<WIDTH; j++) {
ImgPtr = Img + i * WIDTH + j;
printf("%d ", *ImgPtr);
}
printf("\n");
}
printf("\n\n");
printf("Output CPU");
printf("\n\n");
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = Integral + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
printf("\n\n");
printf("OUTPUT GPU");
printf("\n\n");
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 0;
//CudaComputeIntegralImages(ImgInt, Integral, IntegralTransposed);
start = clock();
PrefixSumRows(Integral, ImgInt, IntegralTransposed, HEIGHT, WIDTH);
end = clock();
printf("GPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = IntegralTransposed + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
hipFree(Img);
hipFree(ImgInt);
hipFree(Integral);
hipFree(IntegralTransposed);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9transposePiS_ii
.globl _Z9transposePiS_ii
.p2align 8
.type _Z9transposePiS_ii,@function
_Z9transposePiS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v2, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_mul_i32 s14, s14, s3
s_mul_i32 s15, s15, s2
v_add_nc_u32_e32 v0, s14, v1
v_add_nc_u32_e32 v3, s15, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s4, v0
v_cmp_gt_i32_e64 s2, s5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, vcc_lo, s2
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB0_2
s_load_b64 s[6:7], s[0:1], 0x0
v_mad_u64_u32 v[4:5], null, v3, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[3:4], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
global_load_b32 v0, v[3:4], off
v_lshlrev_b32_e32 v3, 2, v1
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v3, v2, 7, v3
s_waitcnt vmcnt(0)
ds_store_b32 v3, v0
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s2
v_add_nc_u32_e32 v0, s15, v1
v_add_nc_u32_e32 v3, s14, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmp_gt_i32_e32 vcc_lo, s5, v0
v_cmp_gt_i32_e64 s2, s4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_4
v_lshlrev_b32_e32 v2, 2, v2
s_load_b64 s[0:1], s[0:1], 0x8
v_mad_u64_u32 v[4:5], null, v3, s5, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v0, v1, 7, v2
v_ashrrev_i32_e32 v5, 31, v4
ds_load_b32 v2, v0
v_lshlrev_b64 v[0:1], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9transposePiS_ii
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9transposePiS_ii, .Lfunc_end0-_Z9transposePiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z17KernPrefixSumRowsPiS_ii
.globl _Z17KernPrefixSumRowsPiS_ii
.p2align 8
.type _Z17KernPrefixSumRowsPiS_ii,@function
_Z17KernPrefixSumRowsPiS_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x14
s_load_b64 s[8:9], s[0:1], 0x8
v_dual_mov_b32 v4, 0 :: v_dual_lshlrev_b32 v1, 1, v0
v_mov_b32_e32 v5, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s6, s15, s4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e64 s2, s4, v1
s_ashr_i32 s7, s6, 31
s_lshl_b64 s[10:11], s[6:7], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s8, s8, s10
s_addc_u32 s9, s9, s11
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_2
v_lshlrev_b32_e32 v2, 2, v1
global_load_b32 v4, v2, s[8:9]
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s3
s_load_b32 s5, s[0:1], 0x24
v_or_b32_e32 v2, 1, v1
v_lshl_add_u32 v3, v1, 2, 0
s_delay_alu instid0(VALU_DEP_2)
v_cmp_gt_i32_e64 s3, s4, v2
s_waitcnt vmcnt(0)
ds_store_b32 v3, v4
s_and_saveexec_b32 s10, s3
s_cbranch_execz .LBB1_4
v_lshlrev_b32_e32 v4, 2, v2
global_load_b32 v5, v4, s[8:9]
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s10
s_waitcnt lgkmcnt(0)
v_cmp_eq_u16_e64 s10, s5, 0
v_lshl_add_u32 v4, v2, 2, 0
s_and_b32 s9, 0xffff, s5
s_mov_b32 s8, 1
s_lshl_b32 s5, s9, 1
s_and_b32 vcc_lo, exec_lo, s10
s_waitcnt vmcnt(0)
ds_store_b32 v4, v5
s_cbranch_vccnz .LBB1_10
v_add_nc_u32_e32 v5, 2, v1
s_mov_b32 s10, s5
.p2align 6
.LBB1_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_lshr_b32 s11, s10, 1
s_mov_b32 s12, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s11, v0
s_cbranch_execz .LBB1_8
v_mul_lo_u32 v6, s8, v2
v_mul_lo_u32 v7, s8, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b32_e32 v6, 2, v6
v_lshlrev_b32_e32 v7, 2, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add3_u32 v6, v6, 0, -4
v_add3_u32 v7, v7, 0, -4
ds_load_b32 v6, v6
ds_load_b32 v8, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v8, v6
ds_store_b32 v7, v6
.LBB1_8:
s_or_b32 exec_lo, exec_lo, s12
s_lshl_b32 s8, s8, 1
s_cmp_lt_u32 s10, 4
s_cbranch_scc1 .LBB1_10
s_mov_b32 s10, s11
s_branch .LBB1_6
.LBB1_10:
s_lshl_b32 s10, s5, 2
s_mov_b32 s11, exec_lo
s_add_i32 s10, s10, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s10, s10, -4
v_mov_b32_e32 v5, s10
ds_load_b32 v5, v5
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_12
v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v7, s10
ds_store_b32 v7, v6
.LBB1_12:
s_or_b32 exec_lo, exec_lo, s11
s_cmp_eq_u32 s9, 0
s_cbranch_scc1 .LBB1_17
v_add_nc_u32_e32 v6, 2, v1
s_mov_b32 s9, 1
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_15
.p2align 6
.LBB1_14:
s_or_b32 exec_lo, exec_lo, s10
s_lshl_b32 s9, s9, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s9, s5
s_cbranch_scc1 .LBB1_17
.LBB1_15:
s_lshr_b32 s8, s8, 1
s_mov_b32 s10, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s9, v0
s_cbranch_execz .LBB1_14
v_mul_lo_u32 v7, s8, v6
v_mul_lo_u32 v9, s8, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b32_e32 v7, 2, v7
v_lshlrev_b32_e32 v9, 2, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add3_u32 v7, v7, 0, -4
v_add3_u32 v9, v9, 0, -4
ds_load_b32 v8, v7
ds_load_b32 v10, v9
s_waitcnt lgkmcnt(1)
ds_store_b32 v9, v8
ds_load_b32 v8, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, v8, v10
ds_store_b32 v7, v8
s_branch .LBB1_14
.LBB1_17:
s_set_inst_prefetch_distance 0x2
s_load_b64 s[0:1], s[0:1], 0x0
s_lshl_b64 s[6:7], s[6:7], 2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_add_u32 s0, s0, s6
s_addc_u32 s1, s1, s7
s_add_u32 s0, s0, -4
s_addc_u32 s1, s1, -1
s_and_saveexec_b32 s5, s2
s_cbranch_execz .LBB1_19
ds_load_b32 v0, v3
v_lshlrev_b32_e32 v1, 2, v1
s_waitcnt lgkmcnt(0)
global_store_b32 v1, v0, s[0:1]
.LBB1_19:
s_or_b32 exec_lo, exec_lo, s5
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB1_21
ds_load_b32 v0, v4
s_mov_b32 s5, 0
v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v2
s_lshl_b64 s[2:3], s[4:5], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s2, s0, s2
s_addc_u32 s3, s1, s3
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_store_b32 v1, v0, s[0:1]
global_store_b32 v2, v5, s[2:3]
.LBB1_21:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17KernPrefixSumRowsPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z17KernPrefixSumRowsPiS_ii, .Lfunc_end1-_Z17KernPrefixSumRowsPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z22KernPrefixSumRowsTransPiS_ii
.globl _Z22KernPrefixSumRowsTransPiS_ii
.p2align 8
.type _Z22KernPrefixSumRowsTransPiS_ii,@function
_Z22KernPrefixSumRowsTransPiS_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x14
s_load_b64 s[6:7], s[0:1], 0x8
v_dual_mov_b32 v4, 0 :: v_dual_lshlrev_b32 v1, 1, v0
v_mov_b32_e32 v5, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s8, s15, s4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e64 s2, s4, v1
s_ashr_i32 s9, s8, 31
s_lshl_b64 s[8:9], s[8:9], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s6, s6, s8
s_addc_u32 s7, s7, s9
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB2_2
v_lshlrev_b32_e32 v2, 2, v1
global_load_b32 v4, v2, s[6:7]
.LBB2_2:
s_or_b32 exec_lo, exec_lo, s3
s_load_b32 s5, s[0:1], 0x24
v_or_b32_e32 v2, 1, v1
v_lshl_add_u32 v3, v1, 2, 0
s_delay_alu instid0(VALU_DEP_2)
v_cmp_gt_i32_e64 s3, s4, v2
s_waitcnt vmcnt(0)
ds_store_b32 v3, v4
s_and_saveexec_b32 s8, s3
s_cbranch_execz .LBB2_4
v_lshlrev_b32_e32 v4, 2, v2
global_load_b32 v5, v4, s[6:7]
.LBB2_4:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt lgkmcnt(0)
v_cmp_eq_u16_e64 s8, s5, 0
v_lshl_add_u32 v4, v2, 2, 0
s_and_b32 s7, 0xffff, s5
s_mov_b32 s6, 1
s_lshl_b32 s5, s7, 1
s_and_b32 vcc_lo, exec_lo, s8
s_waitcnt vmcnt(0)
ds_store_b32 v4, v5
s_cbranch_vccnz .LBB2_10
v_add_nc_u32_e32 v5, 2, v1
s_mov_b32 s8, s5
.p2align 6
.LBB2_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_lshr_b32 s9, s8, 1
s_mov_b32 s10, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s9, v0
s_cbranch_execz .LBB2_8
v_mul_lo_u32 v6, s6, v2
v_mul_lo_u32 v7, s6, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b32_e32 v6, 2, v6
v_lshlrev_b32_e32 v7, 2, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add3_u32 v6, v6, 0, -4
v_add3_u32 v7, v7, 0, -4
ds_load_b32 v6, v6
ds_load_b32 v8, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v8, v6
ds_store_b32 v7, v6
.LBB2_8:
s_or_b32 exec_lo, exec_lo, s10
s_lshl_b32 s6, s6, 1
s_cmp_lt_u32 s8, 4
s_cbranch_scc1 .LBB2_10
s_mov_b32 s8, s9
s_branch .LBB2_6
.LBB2_10:
s_lshl_b32 s8, s5, 2
s_mov_b32 s9, exec_lo
s_add_i32 s8, s8, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s8, s8, -4
v_mov_b32_e32 v5, s8
ds_load_b32 v5, v5
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB2_12
v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v7, s8
ds_store_b32 v7, v6
.LBB2_12:
s_or_b32 exec_lo, exec_lo, s9
s_cmp_eq_u32 s7, 0
s_cbranch_scc1 .LBB2_17
v_add_nc_u32_e32 v6, 2, v1
s_mov_b32 s7, 1
s_set_inst_prefetch_distance 0x1
s_branch .LBB2_15
.p2align 6
.LBB2_14:
s_or_b32 exec_lo, exec_lo, s8
s_lshl_b32 s7, s7, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s7, s5
s_cbranch_scc1 .LBB2_17
.LBB2_15:
s_lshr_b32 s6, s6, 1
s_mov_b32 s8, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s7, v0
s_cbranch_execz .LBB2_14
v_mul_lo_u32 v7, s6, v6
v_mul_lo_u32 v9, s6, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b32_e32 v7, 2, v7
v_lshlrev_b32_e32 v9, 2, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add3_u32 v7, v7, 0, -4
v_add3_u32 v9, v9, 0, -4
ds_load_b32 v8, v7
ds_load_b32 v10, v9
s_waitcnt lgkmcnt(1)
ds_store_b32 v9, v8
ds_load_b32 v8, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, v8, v10
ds_store_b32 v7, v8
s_branch .LBB2_14
.LBB2_17:
s_set_inst_prefetch_distance 0x2
s_load_b64 s[0:1], s[0:1], 0x0
s_add_i32 s6, s4, 1
s_waitcnt lgkmcnt(0)
s_mul_i32 s8, s6, s15
s_barrier
s_ashr_i32 s9, s8, 31
buffer_gl0_inv
s_lshl_b64 s[8:9], s[8:9], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_add_u32 s5, s0, s8
s_addc_u32 s8, s1, s9
s_ashr_i32 s7, s6, 31
s_lshl_b64 s[0:1], s[6:7], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s0, s5, s0
s_addc_u32 s1, s8, s1
s_and_saveexec_b32 s5, s2
s_cbranch_execz .LBB2_19
ds_load_b32 v0, v3
v_lshlrev_b32_e32 v1, 2, v1
s_waitcnt lgkmcnt(0)
global_store_b32 v1, v0, s[0:1]
.LBB2_19:
s_or_b32 exec_lo, exec_lo, s5
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB2_21
ds_load_b32 v0, v4
s_mov_b32 s5, 0
v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v2
s_lshl_b64 s[2:3], s[4:5], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s2, s0, s2
s_addc_u32 s3, s1, s3
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_store_b32 v1, v0, s[0:1]
global_store_b32 v2, v5, s[2:3]
.LBB2_21:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z22KernPrefixSumRowsTransPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z22KernPrefixSumRowsTransPiS_ii, .Lfunc_end2-_Z22KernPrefixSumRowsTransPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9transposePiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9transposePiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17KernPrefixSumRowsPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17KernPrefixSumRowsPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z22KernPrefixSumRowsTransPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z22KernPrefixSumRowsTransPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#define WIDTH 7
#define HEIGHT 3
#define THREADS_PER_BLOCK 32
#define CUDA_CHECK(err) if(err != hipSuccess)\
{\
printf("hipMalloc returned error %s (code %d) (file %s) (line %d)\n", hipGetErrorString(err), err, __FILE__, __LINE__);\
}\
__global__ void transpose(int *input, int *output, int width, int height)
{
__shared__ int temp[THREADS_PER_BLOCK][THREADS_PER_BLOCK];
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
int id_in = yIndex * width + xIndex;
temp[threadIdx.y][threadIdx.x] = input[id_in];
}
__syncthreads();
xIndex = blockIdx.y * blockDim.y + threadIdx.x;
yIndex = blockIdx.x * blockDim.x + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
int id_out = yIndex * height + xIndex;
output[id_out] = temp[threadIdx.x][threadIdx.y];
}
}
inline __device__
void PrefixSum(int* output, int* input, int w, int nextpow2)
{
extern __shared__ int temp[];
const int tdx = threadIdx.x;
int offset = 1;
const int tdx2 = 2*tdx;
const int tdx2p = tdx2 + 1;
temp[tdx2] = tdx2 < w ? input[tdx2] : 0;
temp[tdx2p] = tdx2p < w ? input[tdx2p] : 0;
for(int d = nextpow2>>1; d > 0; d >>= 1) {
__syncthreads();
if(tdx < d)
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
int last = temp[nextpow2 - 1];
if(tdx == 0) temp[nextpow2 - 1] = 0;
for(int d = 1; d < nextpow2; d *= 2) {
offset >>= 1;
__syncthreads();
if(tdx < d )
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if(tdx2 < w) output[tdx2] = temp[tdx2];
if(tdx2p < w) output[tdx2p] = temp[tdx2p];
if(tdx2p < w) output[w] = last;
}
__global__ void KernPrefixSumRows(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*width-1, in+row*width, width, 2*blockDim.x );
}
__global__ void KernPrefixSumRowsTrans(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*(width+1)+(width+1), in+row*width, width, 2*blockDim.x );
}
void PrefixSumRows(int *out, int *in, int *outT, int height, int width)
{
dim3 blockDim = dim3( 1, 1);
while(blockDim.x < ceil(width/2.0f)) blockDim.x <<= 1;
dim3 gridDim = dim3( 1, height );
KernPrefixSumRows<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,in,height,width);
hipDeviceSynchronize();
dim3 gridSize, blockSize;
gridSize.x = (int)((width + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((height + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, width, height);
hipDeviceSynchronize();
memset(out, 0, (HEIGHT+1)*sizeof(int));
blockDim = dim3( 1, 1);
while(blockDim.x < ceil((height)/2.0f)) blockDim.x <<= 1;
gridDim = dim3( 1, width );
KernPrefixSumRowsTrans<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,outT,width,height);
hipDeviceSynchronize();
gridSize.x = (int)((height+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((width+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, height+1, width+1);
hipDeviceSynchronize();
}
void ComputeIntegrals(const unsigned char *Img, int *Integral) {
const int SUM_WIDTH_STEP = (WIDTH+1);
#define SUM_TYPE int
int iW = WIDTH; // image dimensions
int iH = HEIGHT;
int sW = WIDTH+1; // sum dimensions
unsigned char *ImgPtr = 0;
SUM_TYPE *IntegPtr = 0;
// write zeros to first row
memset(Integral, 0, (WIDTH+1)*sizeof(int));
//#if WITH_CUDA
// CudaComputeIntegralImages(Img, Integral, TiltedIntegral, SUM_WIDTH_STEP, cudaComputeStream);
//#else
{
int yy=1;
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
// compute regular integral and first pass of tilted
for(int yy=2; yy<=iH; yy++){
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
printf("\n\n");
//#endif
}
int main() {
unsigned char *Img=0;
int *ImgInt=0;
int *Integral=0;
int *IntegralTransposed=0;
clock_t start, end;
CUDA_CHECK( hipMallocManaged((void **) &Img, WIDTH*HEIGHT) );
CUDA_CHECK( hipMallocManaged((void **) &ImgInt, WIDTH*HEIGHT*sizeof(int)) );
CUDA_CHECK( hipMallocManaged((void **) &Integral, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
CUDA_CHECK( hipMallocManaged((void **) &IntegralTransposed, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
for (int i=0; i<WIDTH*HEIGHT; i++) Img[i] = 1;
for (int i=0; i<WIDTH*HEIGHT; i++) ImgInt[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) IntegralTransposed[i] = 1;
start = clock();
ComputeIntegrals(Img, Integral);
end = clock();
printf("CPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
int *IntegPtr;
unsigned char *ImgPtr;
// input
printf("Input\n\n");
for (int i=0; i<HEIGHT; i++) {
for (int j=0; j<WIDTH; j++) {
ImgPtr = Img + i * WIDTH + j;
printf("%d ", *ImgPtr);
}
printf("\n");
}
printf("\n\n");
printf("Output CPU");
printf("\n\n");
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = Integral + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
printf("\n\n");
printf("OUTPUT GPU");
printf("\n\n");
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 0;
//CudaComputeIntegralImages(ImgInt, Integral, IntegralTransposed);
start = clock();
PrefixSumRows(Integral, ImgInt, IntegralTransposed, HEIGHT, WIDTH);
end = clock();
printf("GPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = IntegralTransposed + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
hipFree(Img);
hipFree(ImgInt);
hipFree(Integral);
hipFree(IntegralTransposed);
return 0;
} | .text
.file "integralImageExclusive.hip"
.globl _Z24__device_stub__transposePiS_ii # -- Begin function _Z24__device_stub__transposePiS_ii
.p2align 4, 0x90
.type _Z24__device_stub__transposePiS_ii,@function
_Z24__device_stub__transposePiS_ii: # @_Z24__device_stub__transposePiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9transposePiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__transposePiS_ii, .Lfunc_end0-_Z24__device_stub__transposePiS_ii
.cfi_endproc
# -- End function
.globl _Z32__device_stub__KernPrefixSumRowsPiS_ii # -- Begin function _Z32__device_stub__KernPrefixSumRowsPiS_ii
.p2align 4, 0x90
.type _Z32__device_stub__KernPrefixSumRowsPiS_ii,@function
_Z32__device_stub__KernPrefixSumRowsPiS_ii: # @_Z32__device_stub__KernPrefixSumRowsPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17KernPrefixSumRowsPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z32__device_stub__KernPrefixSumRowsPiS_ii, .Lfunc_end1-_Z32__device_stub__KernPrefixSumRowsPiS_ii
.cfi_endproc
# -- End function
.globl _Z37__device_stub__KernPrefixSumRowsTransPiS_ii # -- Begin function _Z37__device_stub__KernPrefixSumRowsTransPiS_ii
.p2align 4, 0x90
.type _Z37__device_stub__KernPrefixSumRowsTransPiS_ii,@function
_Z37__device_stub__KernPrefixSumRowsTransPiS_ii: # @_Z37__device_stub__KernPrefixSumRowsTransPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z22KernPrefixSumRowsTransPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z37__device_stub__KernPrefixSumRowsTransPiS_ii, .Lfunc_end2-_Z37__device_stub__KernPrefixSumRowsTransPiS_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z13PrefixSumRowsPiS_S_ii
.LCPI3_0:
.long 0x3f000000 # float 0.5
.LCPI3_1:
.long 0x3f800000 # float 1
.text
.globl _Z13PrefixSumRowsPiS_S_ii
.p2align 4, 0x90
.type _Z13PrefixSumRowsPiS_S_ii,@function
_Z13PrefixSumRowsPiS_S_ii: # @_Z13PrefixSumRowsPiS_S_ii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r8d, %ebx
movl %ecx, %r14d
movq %rdx, 112(%rsp) # 8-byte Spill
movq %rsi, %rbp
movq %rdi, %r12
cvtsi2ss %r8d, %xmm0
movabsq $-4294967296, %r15 # imm = 0xFFFFFFFF00000000
movabsq $4294967297, %r13 # imm = 0x100000001
mulss .LCPI3_0(%rip), %xmm0
callq ceilf@PLT
ucomiss .LCPI3_1(%rip), %xmm0
movq %r13, %rdx
jbe .LBB3_3
# %bb.1: # %.lr.ph.preheader
cvtss2sd %xmm0, %xmm0
movq %r13, %rdx
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
leal (%rdx,%rdx), %eax
andq %r15, %rdx
orq %rax, %rdx
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
ucomisd %xmm1, %xmm0
ja .LBB3_2
.LBB3_3: # %._crit_edge
movq %r14, %rdi
shlq $32, %rdi
orq $1, %rdi
movl %edx, %r8d
shlq $3, %r8
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_5
# %bb.4:
movq %r12, 72(%rsp)
movq %rbp, 64(%rsp)
movl %r14d, 12(%rsp)
movl %ebx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17KernPrefixSumRowsPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_5:
movabsq $137438953504, %rbp # imm = 0x2000000020
callq hipDeviceSynchronize
leal 31(%rbx), %eax
leal 62(%rbx), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $5, %ecx
leal 31(%r14), %eax
leal 62(%r14), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $5, %edi
shlq $32, %rdi
orq %rcx, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_7
# %bb.6:
movq %r12, 72(%rsp)
movq 112(%rsp), %rax # 8-byte Reload
movq %rax, 64(%rsp)
movl %ebx, 12(%rsp)
movl %r14d, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9transposePiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_7:
movabsq $34359738360, %rbp # imm = 0x7FFFFFFF8
callq hipDeviceSynchronize
xorps %xmm0, %xmm0
cvtsi2ss %r14d, %xmm0
xorpd %xmm1, %xmm1
movupd %xmm1, (%r12)
mulss .LCPI3_0(%rip), %xmm0
callq ceilf@PLT
ucomiss .LCPI3_1(%rip), %xmm0
jbe .LBB3_10
# %bb.8: # %.lr.ph150.preheader
cvtss2sd %xmm0, %xmm0
.p2align 4, 0x90
.LBB3_9: # %.lr.ph150
# =>This Inner Loop Header: Depth=1
leal (,%r13,2), %eax
andq %r15, %r13
orq %rax, %r13
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
ucomisd %xmm1, %xmm0
ja .LBB3_9
.LBB3_10: # %._crit_edge151
movq %rbx, %rdi
shlq $32, %rdi
orq $1, %rdi
leaq (,%r13,8), %r8
andq %rbp, %r8
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_12
# %bb.11:
movq %r12, 72(%rsp)
movq 112(%rsp), %rax # 8-byte Reload
movq %rax, 64(%rsp)
movl %ebx, 12(%rsp)
movl %r14d, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z22KernPrefixSumRowsTransPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_12:
callq hipDeviceSynchronize
leal 32(%r14), %eax
leal 63(%r14), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $5, %ecx
leal 32(%rbx), %eax
leal 63(%rbx), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $5, %edi
shlq $32, %rdi
orq %rcx, %rdi
movl $1, %esi
movabsq $137438953504, %rdx # imm = 0x2000000020
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_14
# %bb.13:
incl %ebx
incl %r14d
movq %r12, 72(%rsp)
movq 112(%rsp), %rax # 8-byte Reload
movq %rax, 64(%rsp)
movl %r14d, 12(%rsp)
movl %ebx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9transposePiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_14:
callq hipDeviceSynchronize
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z13PrefixSumRowsPiS_S_ii, .Lfunc_end3-_Z13PrefixSumRowsPiS_S_ii
.cfi_endproc
# -- End function
.globl _Z16ComputeIntegralsPKhPi # -- Begin function _Z16ComputeIntegralsPKhPi
.p2align 4, 0x90
.type _Z16ComputeIntegralsPKhPi,@function
_Z16ComputeIntegralsPKhPi: # @_Z16ComputeIntegralsPKhPi
.cfi_startproc
# %bb.0:
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsi)
movups %xmm0, (%rsi)
movl $0, 32(%rsi)
xorl %eax, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB4_1: # =>This Inner Loop Header: Depth=1
movzbl (%rdi,%rax), %edx
addl %ecx, %edx
subl (%rsi,%rax,4), %edx
addl 4(%rsi,%rax,4), %edx
movl %edx, 36(%rsi,%rax,4)
incq %rax
movl %edx, %ecx
cmpl $6, %eax
jne .LBB4_1
# %bb.2:
movzbl (%rdi,%rax), %ecx
addl 32(%rsi,%rax,4), %ecx
subl (%rsi,%rax,4), %ecx
addl 4(%rsi,%rax,4), %ecx
movl %ecx, 36(%rsi,%rax,4)
leaq 68(%rsi), %rax
addq $7, %rdi
movl $2, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB4_3: # =>This Loop Header: Depth=1
# Child Loop BB4_4 Depth 2
movq %rdx, %r8
shlq $5, %r8
movq %rcx, %r9
shlq $5, %r9
movl $0, (%rsi,%r9)
movl 64(%rsi,%r8), %r9d
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB4_4: # Parent Loop BB4_3 Depth=1
# => This Inner Loop Header: Depth=2
movzbl (%rdi,%r8), %r10d
addl %r9d, %r10d
subl -36(%rax,%r8,4), %r10d
addl -32(%rax,%r8,4), %r10d
movl %r10d, (%rax,%r8,4)
incq %r8
movl %r10d, %r9d
cmpl $6, %r8d
jne .LBB4_4
# %bb.5: # in Loop: Header=BB4_3 Depth=1
movzbl (%rdi,%r8), %r9d
addl -4(%rax,%r8,4), %r9d
subl -36(%rax,%r8,4), %r9d
addl -32(%rax,%r8,4), %r9d
movl %r9d, (%rax,%r8,4)
incq %rcx
incq %rdx
addq $32, %rax
addq $7, %rdi
cmpq $4, %rcx
jne .LBB4_3
# %bb.6:
movl $.Lstr.5, %edi
jmp puts@PLT # TAILCALL
.Lfunc_end4:
.size _Z16ComputeIntegralsPKhPi, .Lfunc_end4-_Z16ComputeIntegralsPKhPi
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI5_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $32, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movq $0, (%rsp)
movq $0, 8(%rsp)
leaq 16(%rsp), %rdi
movl $21, %esi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB5_2
# %bb.1:
leaq 16(%rsp), %rbx
movl $21, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl %eax, %edi
callq hipGetErrorString
movq %rax, %r14
movl $21, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl $.L.str.1, %edi
movl $.L.str.2, %ecx
movq %r14, %rsi
movl %eax, %edx
movl $231, %r8d
xorl %eax, %eax
callq printf
.LBB5_2:
leaq 24(%rsp), %rdi
movl $84, %esi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB5_4
# %bb.3:
leaq 24(%rsp), %rbx
movl $84, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl %eax, %edi
callq hipGetErrorString
movq %rax, %r14
movl $84, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl $.L.str.1, %edi
movl $.L.str.2, %ecx
movq %r14, %rsi
movl %eax, %edx
movl $232, %r8d
xorl %eax, %eax
callq printf
.LBB5_4:
movq %rsp, %rdi
movl $128, %esi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB5_6
# %bb.5:
movq %rsp, %rbx
movl $128, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl %eax, %edi
callq hipGetErrorString
movq %rax, %r14
movl $128, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl $.L.str.1, %edi
movl $.L.str.2, %ecx
movq %r14, %rsi
movl %eax, %edx
movl $233, %r8d
xorl %eax, %eax
callq printf
.LBB5_6:
leaq 8(%rsp), %rdi
movl $128, %esi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB5_7
# %bb.34:
leaq 8(%rsp), %rbx
movl $128, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl %eax, %edi
callq hipGetErrorString
movq %rax, %r14
movl $128, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl $.L.str.1, %edi
movl $.L.str.2, %ecx
movq %r14, %rsi
movl %eax, %edx
movl $234, %r8d
xorl %eax, %eax
callq printf
.LBB5_7: # %.preheader148
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_8: # =>This Inner Loop Header: Depth=1
movq 16(%rsp), %rcx
movb $1, (%rcx,%rax)
incq %rax
cmpq $21, %rax
jne .LBB5_8
# %bb.9: # %.preheader66
movq 24(%rsp), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_10: # =>This Inner Loop Header: Depth=1
movl $1, (%rax,%rcx,4)
incq %rcx
cmpq $21, %rcx
jne .LBB5_10
# %bb.11: # %.preheader65
movq (%rsp), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_12: # =>This Inner Loop Header: Depth=1
movl $1, (%rax,%rcx,4)
incq %rcx
cmpq $32, %rcx
jne .LBB5_12
# %bb.13: # %.preheader64
movq 8(%rsp), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_14: # =>This Inner Loop Header: Depth=1
movl $1, (%rax,%rcx,4)
incq %rcx
cmpq $32, %rcx
jne .LBB5_14
# %bb.15:
callq clock
movq %rax, %rbx
movq 16(%rsp), %rax
movq (%rsp), %rcx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rcx)
movups %xmm0, (%rcx)
movl $0, 32(%rcx)
movl 32(%rcx), %esi
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_16: # =>This Inner Loop Header: Depth=1
movzbl (%rax,%rdx), %edi
addl %esi, %edi
subl (%rcx,%rdx,4), %edi
addl 4(%rcx,%rdx,4), %edi
movl %edi, 36(%rcx,%rdx,4)
incq %rdx
movl %edi, %esi
cmpl $6, %edx
jne .LBB5_16
# %bb.17:
movzbl (%rax,%rdx), %esi
addl 32(%rcx,%rdx,4), %esi
subl (%rcx,%rdx,4), %esi
addl 4(%rcx,%rdx,4), %esi
movl %esi, 36(%rcx,%rdx,4)
leaq 68(%rcx), %rdx
addq $7, %rax
movl $2, %esi
xorl %edi, %edi
.p2align 4, 0x90
.LBB5_18: # =>This Loop Header: Depth=1
# Child Loop BB5_19 Depth 2
movq %rdi, %r8
shlq $5, %r8
movq %rsi, %r9
shlq $5, %r9
movl $0, (%rcx,%r9)
movl 64(%rcx,%r8), %r9d
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB5_19: # Parent Loop BB5_18 Depth=1
# => This Inner Loop Header: Depth=2
movzbl (%rax,%r8), %r10d
addl %r9d, %r10d
subl -36(%rdx,%r8,4), %r10d
addl -32(%rdx,%r8,4), %r10d
movl %r10d, (%rdx,%r8,4)
incq %r8
movl %r10d, %r9d
cmpl $6, %r8d
jne .LBB5_19
# %bb.20: # in Loop: Header=BB5_18 Depth=1
movzbl (%rax,%r8), %r9d
addl -4(%rdx,%r8,4), %r9d
subl -36(%rdx,%r8,4), %r9d
addl -32(%rdx,%r8,4), %r9d
movl %r9d, (%rdx,%r8,4)
incq %rsi
incq %rdi
addq $32, %rdx
addq $7, %rax
cmpq $4, %rsi
jne .LBB5_18
# %bb.21: # %_Z16ComputeIntegralsPKhPi.exit
movl $.Lstr.5, %edi
callq puts@PLT
callq clock
subq %rbx, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI5_0(%rip), %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
movl $.Lstr.1, %edi
callq puts@PLT
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_22: # %.preheader63
# =>This Loop Header: Depth=1
# Child Loop BB5_23 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_23: # Parent Loop BB5_22 Depth=1
# => This Inner Loop Header: Depth=2
movq 16(%rsp), %rax
addq %rbx, %rax
movzbl (%r15,%rax), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
incq %r15
cmpq $7, %r15
jne .LBB5_23
# %bb.24: # in Loop: Header=BB5_22 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $7, %rbx
cmpq $3, %r14
jne .LBB5_22
# %bb.25:
movl $.Lstr.5, %edi
callq puts@PLT
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.5, %edi
callq puts@PLT
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_26: # %.preheader62
# =>This Loop Header: Depth=1
# Child Loop BB5_27 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_27: # Parent Loop BB5_26 Depth=1
# => This Inner Loop Header: Depth=2
movq (%rsp), %rax
addq %rbx, %rax
movl (%rax,%r15,4), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
incq %r15
cmpq $8, %r15
jne .LBB5_27
# %bb.28: # in Loop: Header=BB5_26 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $32, %rbx
cmpq $4, %r14
jne .LBB5_26
# %bb.29:
movl $.Lstr.5, %edi
callq puts@PLT
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.5, %edi
callq puts@PLT
movq (%rsp), %rax
xorpd %xmm0, %xmm0
movupd %xmm0, (%rax)
movupd %xmm0, 16(%rax)
movupd %xmm0, 32(%rax)
movupd %xmm0, 48(%rax)
movupd %xmm0, 64(%rax)
movupd %xmm0, 80(%rax)
movupd %xmm0, 96(%rax)
movupd %xmm0, 112(%rax)
callq clock
movq %rax, %rbx
movq (%rsp), %rdi
movq 24(%rsp), %rsi
movq 8(%rsp), %rdx
movl $3, %ecx
movl $7, %r8d
callq _Z13PrefixSumRowsPiS_S_ii
callq clock
subq %rbx, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI5_0(%rip), %xmm0
movl $.L.str.9, %edi
movb $1, %al
callq printf
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_30: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_31 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_31: # Parent Loop BB5_30 Depth=1
# => This Inner Loop Header: Depth=2
movq 8(%rsp), %rax
addq %rbx, %rax
movl (%rax,%r15,4), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
incq %r15
cmpq $8, %r15
jne .LBB5_31
# %bb.32: # in Loop: Header=BB5_30 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $32, %rbx
cmpq $4, %r14
jne .LBB5_30
# %bb.33:
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $32, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9transposePiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17KernPrefixSumRowsPiS_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z22KernPrefixSumRowsTransPiS_ii, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9transposePiS_ii,@object # @_Z9transposePiS_ii
.section .rodata,"a",@progbits
.globl _Z9transposePiS_ii
.p2align 3, 0x0
_Z9transposePiS_ii:
.quad _Z24__device_stub__transposePiS_ii
.size _Z9transposePiS_ii, 8
.type _Z17KernPrefixSumRowsPiS_ii,@object # @_Z17KernPrefixSumRowsPiS_ii
.globl _Z17KernPrefixSumRowsPiS_ii
.p2align 3, 0x0
_Z17KernPrefixSumRowsPiS_ii:
.quad _Z32__device_stub__KernPrefixSumRowsPiS_ii
.size _Z17KernPrefixSumRowsPiS_ii, 8
.type _Z22KernPrefixSumRowsTransPiS_ii,@object # @_Z22KernPrefixSumRowsTransPiS_ii
.globl _Z22KernPrefixSumRowsTransPiS_ii
.p2align 3, 0x0
_Z22KernPrefixSumRowsTransPiS_ii:
.quad _Z37__device_stub__KernPrefixSumRowsTransPiS_ii
.size _Z22KernPrefixSumRowsTransPiS_ii, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "hipMalloc returned error %s (code %d) (file %s) (line %d)\n"
.size .L.str.1, 59
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/r-abishek/standaloneCodeTrials/main/integralImage/integralImageExclusive.hip"
.size .L.str.2, 134
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "CPU Time Taken: %f\n"
.size .L.str.3, 20
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%d "
.size .L.str.5, 4
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Output CPU"
.size .L.str.7, 11
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "OUTPUT GPU"
.size .L.str.8, 11
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "GPU Time Taken: %f\n"
.size .L.str.9, 20
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9transposePiS_ii"
.size .L__unnamed_1, 19
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z17KernPrefixSumRowsPiS_ii"
.size .L__unnamed_2, 28
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z22KernPrefixSumRowsTransPiS_ii"
.size .L__unnamed_3, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.1,@object # @str.1
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.1:
.asciz "Input\n"
.size .Lstr.1, 7
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "\n"
.size .Lstr.5, 2
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__transposePiS_ii
.addrsig_sym _Z32__device_stub__KernPrefixSumRowsPiS_ii
.addrsig_sym _Z37__device_stub__KernPrefixSumRowsTransPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9transposePiS_ii
.addrsig_sym _Z17KernPrefixSumRowsPiS_ii
.addrsig_sym _Z22KernPrefixSumRowsTransPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00139c64_00000000-6_integralImageExclusive.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "\n\n"
.text
.globl _Z16ComputeIntegralsPKhPi
.type _Z16ComputeIntegralsPKhPi, @function
_Z16ComputeIntegralsPKhPi:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq %rdi, %r8
pxor %xmm0, %xmm0
movups %xmm0, (%rsi)
movups %xmm0, 16(%rsi)
leaq 32(%rsi), %rdx
movl $0, 32(%rsi)
leaq 6(%rdi), %rdi
movq %r8, %rcx
.L4:
addq $1, %rcx
addq $4, %rdx
movzbl -1(%rcx), %eax
addl -4(%rdx), %eax
subl -36(%rdx), %eax
addl -32(%rdx), %eax
movl %eax, (%rdx)
cmpq %rdi, %rcx
jne .L4
movzbl 6(%r8), %eax
addl 56(%rsi), %eax
subl 24(%rsi), %eax
addl 28(%rsi), %eax
movl %eax, 60(%rsi)
leaq 13(%r8), %rdi
addq $64, %rsi
addq $27, %r8
.L6:
leaq -6(%rdi), %rcx
movq %rsi, %r9
movl $0, (%rsi)
movq %rsi, %rdx
.L5:
addq $1, %rcx
addq $4, %rdx
movzbl -1(%rcx), %eax
addl -4(%rdx), %eax
subl -36(%rdx), %eax
addl -32(%rdx), %eax
movl %eax, (%rdx)
cmpq %rdi, %rcx
jne .L5
movzbl (%rcx), %eax
addl 24(%r9), %eax
subl -8(%r9), %eax
addl -4(%r9), %eax
movl %eax, 28(%r9)
addq $7, %rdi
addq $32, %rsi
cmpq %rdi, %r8
jne .L6
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z16ComputeIntegralsPKhPi, .-_Z16ComputeIntegralsPKhPi
.globl _Z32__device_stub__Z9transposePiS_iiPiS_ii
.type _Z32__device_stub__Z9transposePiS_iiPiS_ii, @function
_Z32__device_stub__Z9transposePiS_iiPiS_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9transposePiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z32__device_stub__Z9transposePiS_iiPiS_ii, .-_Z32__device_stub__Z9transposePiS_iiPiS_ii
.globl _Z9transposePiS_ii
.type _Z9transposePiS_ii, @function
_Z9transposePiS_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9transposePiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z9transposePiS_ii, .-_Z9transposePiS_ii
.globl _Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii
.type _Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii, @function
_Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17KernPrefixSumRowsPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii, .-_Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii
.globl _Z17KernPrefixSumRowsPiS_ii
.type _Z17KernPrefixSumRowsPiS_ii, @function
_Z17KernPrefixSumRowsPiS_ii:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z17KernPrefixSumRowsPiS_ii, .-_Z17KernPrefixSumRowsPiS_ii
.globl _Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii
.type _Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii, @function
_Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z22KernPrefixSumRowsTransPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii, .-_Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii
.globl _Z22KernPrefixSumRowsTransPiS_ii
.type _Z22KernPrefixSumRowsTransPiS_ii, @function
_Z22KernPrefixSumRowsTransPiS_ii:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z22KernPrefixSumRowsTransPiS_ii, .-_Z22KernPrefixSumRowsTransPiS_ii
.globl _Z13PrefixSumRowsPiS_S_ii
.type _Z13PrefixSumRowsPiS_S_ii, @function
_Z13PrefixSumRowsPiS_S_ii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $48, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r12
movq %rsi, %r14
movq %rdx, %r13
movl %ecx, %ebx
movl %r8d, %ebp
movl $1, 4(%rsp)
movl $1, 8(%rsp)
pxor %xmm0, %xmm0
cvtsi2ssl %r8d, %xmm0
mulss .LC1(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC5(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC2(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L36
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm1
movss .LC4(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L36:
movl $1, %eax
jmp .L37
.L38:
addl %eax, %eax
.L37:
movl %eax, %edx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
comiss %xmm0, %xmm1
ja .L38
movl $1, 12(%rsp)
movl %ebx, 16(%rsp)
movl $1, 20(%rsp)
movl %eax, (%rsp)
movl %eax, %eax
movl 8(%rsp), %ecx
movl $0, %r9d
leaq 0(,%rax,8), %r8
movq (%rsp), %rdx
movq 12(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L51
.L41:
call cudaDeviceSynchronize@PLT
movl $1, 32(%rsp)
movl $1, 44(%rsp)
leal 62(%rbp), %eax
movl %ebp, %edx
addl $31, %edx
cmovns %edx, %eax
sarl $5, %eax
movl %eax, 24(%rsp)
leal 62(%rbx), %eax
movl %ebx, %edx
addl $31, %edx
cmovns %edx, %eax
sarl $5, %eax
movl %eax, 28(%rsp)
movl $32, 36(%rsp)
movl $32, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L42:
call cudaDeviceSynchronize@PLT
pxor %xmm0, %xmm0
movups %xmm0, (%r12)
pxor %xmm0, %xmm0
cvtsi2ssl %ebx, %xmm0
mulss .LC1(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC5(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC2(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L43
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm1
movss .LC4(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L43:
movl $1, %eax
jmp .L44
.L51:
movl %ebp, %ecx
movl %ebx, %edx
movq %r14, %rsi
movq %r12, %rdi
call _Z41__device_stub__Z17KernPrefixSumRowsPiS_iiPiS_ii
jmp .L41
.L52:
movl %ebx, %ecx
movl %ebp, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z32__device_stub__Z9transposePiS_iiPiS_ii
jmp .L42
.L45:
addl %eax, %eax
.L44:
movl %eax, %edx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
comiss %xmm0, %xmm1
ja .L45
movl %ebp, 16(%rsp)
movl %eax, (%rsp)
movl %eax, %eax
movl 8(%rsp), %ecx
movl $0, %r9d
leaq 0(,%rax,8), %r8
movq (%rsp), %rdx
movq 12(%rsp), %rdi
movl 20(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L48:
call cudaDeviceSynchronize@PLT
leal 63(%rbx), %eax
movl %ebx, %edx
addl $32, %edx
cmovns %edx, %eax
sarl $5, %eax
movl %eax, 24(%rsp)
leal 63(%rbp), %eax
movl %ebp, %edx
addl $32, %edx
cmovns %edx, %eax
sarl $5, %eax
movl %eax, 28(%rsp)
movl 44(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movq 24(%rsp), %rdi
movl 32(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L49:
call cudaDeviceSynchronize@PLT
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
movl %ebx, %ecx
movl %ebp, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z46__device_stub__Z22KernPrefixSumRowsTransPiS_iiPiS_ii
jmp .L48
.L54:
leal 1(%rbp), %ecx
leal 1(%rbx), %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z32__device_stub__Z9transposePiS_iiPiS_ii
jmp .L49
.cfi_endproc
.LFE2058:
.size _Z13PrefixSumRowsPiS_S_ii, .-_Z13PrefixSumRowsPiS_S_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "/home/ubuntu/Datasets/stackv2/train-structured/r-abishek/standaloneCodeTrials/main/integralImage/integralImageExclusive.cu"
.align 8
.LC8:
.string "cudaMalloc returned error %s (code %d) (file %s) (line %d)\n"
.section .rodata.str1.1
.LC10:
.string "CPU Time Taken: %f\n"
.LC11:
.string "Input\n\n"
.LC12:
.string "%d "
.LC13:
.string "\n"
.LC14:
.string "Output CPU"
.LC15:
.string "OUTPUT GPU"
.LC16:
.string "GPU Time Taken: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $48, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movq $0, 32(%rsp)
leaq 8(%rsp), %rdi
movl $1, %edx
movl $21, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L84
.L56:
leaq 16(%rsp), %rdi
movl $1, %edx
movl $84, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L85
.L57:
leaq 24(%rsp), %rdi
movl $1, %edx
movl $128, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L86
.L58:
leaq 32(%rsp), %rdi
movl $1, %edx
movl $128, %esi
call cudaMallocManaged@PLT
testl %eax, %eax
jne .L87
.L59:
movl $0, %eax
.L60:
movq 8(%rsp), %rdx
movb $1, (%rdx,%rax)
addq $1, %rax
cmpq $21, %rax
jne .L60
movl $0, %eax
.L61:
movq 16(%rsp), %rdx
movl $1, (%rdx,%rax)
addq $4, %rax
cmpq $84, %rax
jne .L61
movl $0, %eax
.L62:
movq 24(%rsp), %rdx
movl $1, (%rdx,%rax)
addq $4, %rax
cmpq $128, %rax
jne .L62
movl $0, %eax
.L63:
movq 32(%rsp), %rdx
movl $1, (%rdx,%rax)
addq $4, %rax
cmpq $128, %rax
jne .L63
call clock@PLT
movq %rax, %rbx
movq 24(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z16ComputeIntegralsPKhPi
call clock@PLT
subq %rbx, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC9(%rip), %xmm0
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $7, %ebp
leaq .LC12(%rip), %r12
leaq .LC13(%rip), %r13
.L64:
leaq -7(%rbp), %rbx
.L65:
movq 8(%rsp), %rax
movzbl (%rax,%rbx), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq %rbp, %rbx
jne .L65
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $7, %rbp
cmpq $28, %rbp
jne .L64
leaq .LC0(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $32, %r12d
movl $0, %ebp
leaq .LC12(%rip), %r13
leaq .LC13(%rip), %r14
.L67:
leaq 0(,%rbp,4), %rbx
.L68:
movq 24(%rsp), %rax
movl (%rax,%rbx), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L68
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rbp
addq $32, %r12
cmpq $32, %rbp
jne .L67
leaq .LC0(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
.L70:
movq 24(%rsp), %rdx
movl $0, (%rdx,%rax)
addq $4, %rax
cmpq $128, %rax
jne .L70
call clock@PLT
movq %rax, %rbx
movl $7, %r8d
movl $3, %ecx
movq 32(%rsp), %rdx
movq 16(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z13PrefixSumRowsPiS_S_ii
call clock@PLT
subq %rbx, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC9(%rip), %xmm0
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $0, %r13d
leaq .LC12(%rip), %r12
leaq .LC13(%rip), %r14
.L71:
leaq 0(,%r13,4), %rbx
.L72:
movq 32(%rsp), %rax
movl (%rax,%rbx), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L72
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %r13
addq $32, %rbp
cmpq $32, %r13
jne .L71
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L88
movl $0, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L84:
.cfi_restore_state
leaq 8(%rsp), %rbp
movl $1, %edx
movl $21, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %ebx
movl $1, %edx
movl $21, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $231, %r9d
leaq .LC7(%rip), %r8
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L56
.L85:
leaq 16(%rsp), %rbp
movl $1, %edx
movl $84, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %ebx
movl $1, %edx
movl $84, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $232, %r9d
leaq .LC7(%rip), %r8
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L57
.L86:
leaq 24(%rsp), %rbp
movl $1, %edx
movl $128, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %ebx
movl $1, %edx
movl $128, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $233, %r9d
leaq .LC7(%rip), %r8
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L58
.L87:
leaq 32(%rsp), %rbp
movl $1, %edx
movl $128, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %ebx
movl $1, %edx
movl $128, %esi
movq %rbp, %rdi
call cudaMallocManaged@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $234, %r9d
leaq .LC7(%rip), %r8
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L59
.L88:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC17:
.string "_Z22KernPrefixSumRowsTransPiS_ii"
.section .rodata.str1.1
.LC18:
.string "_Z17KernPrefixSumRowsPiS_ii"
.LC19:
.string "_Z9transposePiS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2092:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _Z22KernPrefixSumRowsTransPiS_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z17KernPrefixSumRowsPiS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z9transposePiS_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1056964608
.align 4
.LC2:
.long 1258291200
.align 4
.LC4:
.long 1065353216
.align 4
.LC5:
.long 2147483647
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC9:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "integralImageExclusive.hip"
.globl _Z24__device_stub__transposePiS_ii # -- Begin function _Z24__device_stub__transposePiS_ii
.p2align 4, 0x90
.type _Z24__device_stub__transposePiS_ii,@function
_Z24__device_stub__transposePiS_ii: # @_Z24__device_stub__transposePiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9transposePiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__transposePiS_ii, .Lfunc_end0-_Z24__device_stub__transposePiS_ii
.cfi_endproc
# -- End function
.globl _Z32__device_stub__KernPrefixSumRowsPiS_ii # -- Begin function _Z32__device_stub__KernPrefixSumRowsPiS_ii
.p2align 4, 0x90
.type _Z32__device_stub__KernPrefixSumRowsPiS_ii,@function
_Z32__device_stub__KernPrefixSumRowsPiS_ii: # @_Z32__device_stub__KernPrefixSumRowsPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17KernPrefixSumRowsPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z32__device_stub__KernPrefixSumRowsPiS_ii, .Lfunc_end1-_Z32__device_stub__KernPrefixSumRowsPiS_ii
.cfi_endproc
# -- End function
.globl _Z37__device_stub__KernPrefixSumRowsTransPiS_ii # -- Begin function _Z37__device_stub__KernPrefixSumRowsTransPiS_ii
.p2align 4, 0x90
.type _Z37__device_stub__KernPrefixSumRowsTransPiS_ii,@function
_Z37__device_stub__KernPrefixSumRowsTransPiS_ii: # @_Z37__device_stub__KernPrefixSumRowsTransPiS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z22KernPrefixSumRowsTransPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z37__device_stub__KernPrefixSumRowsTransPiS_ii, .Lfunc_end2-_Z37__device_stub__KernPrefixSumRowsTransPiS_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z13PrefixSumRowsPiS_S_ii
.LCPI3_0:
.long 0x3f000000 # float 0.5
.LCPI3_1:
.long 0x3f800000 # float 1
.text
.globl _Z13PrefixSumRowsPiS_S_ii
.p2align 4, 0x90
.type _Z13PrefixSumRowsPiS_S_ii,@function
_Z13PrefixSumRowsPiS_S_ii: # @_Z13PrefixSumRowsPiS_S_ii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r8d, %ebx
movl %ecx, %r14d
movq %rdx, 112(%rsp) # 8-byte Spill
movq %rsi, %rbp
movq %rdi, %r12
cvtsi2ss %r8d, %xmm0
movabsq $-4294967296, %r15 # imm = 0xFFFFFFFF00000000
movabsq $4294967297, %r13 # imm = 0x100000001
mulss .LCPI3_0(%rip), %xmm0
callq ceilf@PLT
ucomiss .LCPI3_1(%rip), %xmm0
movq %r13, %rdx
jbe .LBB3_3
# %bb.1: # %.lr.ph.preheader
cvtss2sd %xmm0, %xmm0
movq %r13, %rdx
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
leal (%rdx,%rdx), %eax
andq %r15, %rdx
orq %rax, %rdx
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
ucomisd %xmm1, %xmm0
ja .LBB3_2
.LBB3_3: # %._crit_edge
movq %r14, %rdi
shlq $32, %rdi
orq $1, %rdi
movl %edx, %r8d
shlq $3, %r8
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_5
# %bb.4:
movq %r12, 72(%rsp)
movq %rbp, 64(%rsp)
movl %r14d, 12(%rsp)
movl %ebx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17KernPrefixSumRowsPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_5:
movabsq $137438953504, %rbp # imm = 0x2000000020
callq hipDeviceSynchronize
leal 31(%rbx), %eax
leal 62(%rbx), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $5, %ecx
leal 31(%r14), %eax
leal 62(%r14), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $5, %edi
shlq $32, %rdi
orq %rcx, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_7
# %bb.6:
movq %r12, 72(%rsp)
movq 112(%rsp), %rax # 8-byte Reload
movq %rax, 64(%rsp)
movl %ebx, 12(%rsp)
movl %r14d, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9transposePiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_7:
movabsq $34359738360, %rbp # imm = 0x7FFFFFFF8
callq hipDeviceSynchronize
xorps %xmm0, %xmm0
cvtsi2ss %r14d, %xmm0
xorpd %xmm1, %xmm1
movupd %xmm1, (%r12)
mulss .LCPI3_0(%rip), %xmm0
callq ceilf@PLT
ucomiss .LCPI3_1(%rip), %xmm0
jbe .LBB3_10
# %bb.8: # %.lr.ph150.preheader
cvtss2sd %xmm0, %xmm0
.p2align 4, 0x90
.LBB3_9: # %.lr.ph150
# =>This Inner Loop Header: Depth=1
leal (,%r13,2), %eax
andq %r15, %r13
orq %rax, %r13
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
ucomisd %xmm1, %xmm0
ja .LBB3_9
.LBB3_10: # %._crit_edge151
movq %rbx, %rdi
shlq $32, %rdi
orq $1, %rdi
leaq (,%r13,8), %r8
andq %rbp, %r8
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_12
# %bb.11:
movq %r12, 72(%rsp)
movq 112(%rsp), %rax # 8-byte Reload
movq %rax, 64(%rsp)
movl %ebx, 12(%rsp)
movl %r14d, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z22KernPrefixSumRowsTransPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_12:
callq hipDeviceSynchronize
leal 32(%r14), %eax
leal 63(%r14), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $5, %ecx
leal 32(%rbx), %eax
leal 63(%rbx), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $5, %edi
shlq $32, %rdi
orq %rcx, %rdi
movl $1, %esi
movabsq $137438953504, %rdx # imm = 0x2000000020
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_14
# %bb.13:
incl %ebx
incl %r14d
movq %r12, 72(%rsp)
movq 112(%rsp), %rax # 8-byte Reload
movq %rax, 64(%rsp)
movl %r14d, 12(%rsp)
movl %ebx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9transposePiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_14:
callq hipDeviceSynchronize
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z13PrefixSumRowsPiS_S_ii, .Lfunc_end3-_Z13PrefixSumRowsPiS_S_ii
.cfi_endproc
# -- End function
.globl _Z16ComputeIntegralsPKhPi # -- Begin function _Z16ComputeIntegralsPKhPi
.p2align 4, 0x90
.type _Z16ComputeIntegralsPKhPi,@function
_Z16ComputeIntegralsPKhPi: # @_Z16ComputeIntegralsPKhPi
.cfi_startproc
# %bb.0:
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsi)
movups %xmm0, (%rsi)
movl $0, 32(%rsi)
xorl %eax, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB4_1: # =>This Inner Loop Header: Depth=1
movzbl (%rdi,%rax), %edx
addl %ecx, %edx
subl (%rsi,%rax,4), %edx
addl 4(%rsi,%rax,4), %edx
movl %edx, 36(%rsi,%rax,4)
incq %rax
movl %edx, %ecx
cmpl $6, %eax
jne .LBB4_1
# %bb.2:
movzbl (%rdi,%rax), %ecx
addl 32(%rsi,%rax,4), %ecx
subl (%rsi,%rax,4), %ecx
addl 4(%rsi,%rax,4), %ecx
movl %ecx, 36(%rsi,%rax,4)
leaq 68(%rsi), %rax
addq $7, %rdi
movl $2, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB4_3: # =>This Loop Header: Depth=1
# Child Loop BB4_4 Depth 2
movq %rdx, %r8
shlq $5, %r8
movq %rcx, %r9
shlq $5, %r9
movl $0, (%rsi,%r9)
movl 64(%rsi,%r8), %r9d
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB4_4: # Parent Loop BB4_3 Depth=1
# => This Inner Loop Header: Depth=2
movzbl (%rdi,%r8), %r10d
addl %r9d, %r10d
subl -36(%rax,%r8,4), %r10d
addl -32(%rax,%r8,4), %r10d
movl %r10d, (%rax,%r8,4)
incq %r8
movl %r10d, %r9d
cmpl $6, %r8d
jne .LBB4_4
# %bb.5: # in Loop: Header=BB4_3 Depth=1
movzbl (%rdi,%r8), %r9d
addl -4(%rax,%r8,4), %r9d
subl -36(%rax,%r8,4), %r9d
addl -32(%rax,%r8,4), %r9d
movl %r9d, (%rax,%r8,4)
incq %rcx
incq %rdx
addq $32, %rax
addq $7, %rdi
cmpq $4, %rcx
jne .LBB4_3
# %bb.6:
movl $.Lstr.5, %edi
jmp puts@PLT # TAILCALL
.Lfunc_end4:
.size _Z16ComputeIntegralsPKhPi, .Lfunc_end4-_Z16ComputeIntegralsPKhPi
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI5_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $32, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movq $0, (%rsp)
movq $0, 8(%rsp)
leaq 16(%rsp), %rdi
movl $21, %esi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB5_2
# %bb.1:
leaq 16(%rsp), %rbx
movl $21, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl %eax, %edi
callq hipGetErrorString
movq %rax, %r14
movl $21, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl $.L.str.1, %edi
movl $.L.str.2, %ecx
movq %r14, %rsi
movl %eax, %edx
movl $231, %r8d
xorl %eax, %eax
callq printf
.LBB5_2:
leaq 24(%rsp), %rdi
movl $84, %esi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB5_4
# %bb.3:
leaq 24(%rsp), %rbx
movl $84, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl %eax, %edi
callq hipGetErrorString
movq %rax, %r14
movl $84, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl $.L.str.1, %edi
movl $.L.str.2, %ecx
movq %r14, %rsi
movl %eax, %edx
movl $232, %r8d
xorl %eax, %eax
callq printf
.LBB5_4:
movq %rsp, %rdi
movl $128, %esi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB5_6
# %bb.5:
movq %rsp, %rbx
movl $128, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl %eax, %edi
callq hipGetErrorString
movq %rax, %r14
movl $128, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl $.L.str.1, %edi
movl $.L.str.2, %ecx
movq %r14, %rsi
movl %eax, %edx
movl $233, %r8d
xorl %eax, %eax
callq printf
.LBB5_6:
leaq 8(%rsp), %rdi
movl $128, %esi
movl $1, %edx
callq hipMallocManaged
testl %eax, %eax
je .LBB5_7
# %bb.34:
leaq 8(%rsp), %rbx
movl $128, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl %eax, %edi
callq hipGetErrorString
movq %rax, %r14
movl $128, %esi
movq %rbx, %rdi
movl $1, %edx
callq hipMallocManaged
movl $.L.str.1, %edi
movl $.L.str.2, %ecx
movq %r14, %rsi
movl %eax, %edx
movl $234, %r8d
xorl %eax, %eax
callq printf
.LBB5_7: # %.preheader148
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_8: # =>This Inner Loop Header: Depth=1
movq 16(%rsp), %rcx
movb $1, (%rcx,%rax)
incq %rax
cmpq $21, %rax
jne .LBB5_8
# %bb.9: # %.preheader66
movq 24(%rsp), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_10: # =>This Inner Loop Header: Depth=1
movl $1, (%rax,%rcx,4)
incq %rcx
cmpq $21, %rcx
jne .LBB5_10
# %bb.11: # %.preheader65
movq (%rsp), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_12: # =>This Inner Loop Header: Depth=1
movl $1, (%rax,%rcx,4)
incq %rcx
cmpq $32, %rcx
jne .LBB5_12
# %bb.13: # %.preheader64
movq 8(%rsp), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_14: # =>This Inner Loop Header: Depth=1
movl $1, (%rax,%rcx,4)
incq %rcx
cmpq $32, %rcx
jne .LBB5_14
# %bb.15:
callq clock
movq %rax, %rbx
movq 16(%rsp), %rax
movq (%rsp), %rcx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rcx)
movups %xmm0, (%rcx)
movl $0, 32(%rcx)
movl 32(%rcx), %esi
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_16: # =>This Inner Loop Header: Depth=1
movzbl (%rax,%rdx), %edi
addl %esi, %edi
subl (%rcx,%rdx,4), %edi
addl 4(%rcx,%rdx,4), %edi
movl %edi, 36(%rcx,%rdx,4)
incq %rdx
movl %edi, %esi
cmpl $6, %edx
jne .LBB5_16
# %bb.17:
movzbl (%rax,%rdx), %esi
addl 32(%rcx,%rdx,4), %esi
subl (%rcx,%rdx,4), %esi
addl 4(%rcx,%rdx,4), %esi
movl %esi, 36(%rcx,%rdx,4)
leaq 68(%rcx), %rdx
addq $7, %rax
movl $2, %esi
xorl %edi, %edi
.p2align 4, 0x90
.LBB5_18: # =>This Loop Header: Depth=1
# Child Loop BB5_19 Depth 2
movq %rdi, %r8
shlq $5, %r8
movq %rsi, %r9
shlq $5, %r9
movl $0, (%rcx,%r9)
movl 64(%rcx,%r8), %r9d
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB5_19: # Parent Loop BB5_18 Depth=1
# => This Inner Loop Header: Depth=2
movzbl (%rax,%r8), %r10d
addl %r9d, %r10d
subl -36(%rdx,%r8,4), %r10d
addl -32(%rdx,%r8,4), %r10d
movl %r10d, (%rdx,%r8,4)
incq %r8
movl %r10d, %r9d
cmpl $6, %r8d
jne .LBB5_19
# %bb.20: # in Loop: Header=BB5_18 Depth=1
movzbl (%rax,%r8), %r9d
addl -4(%rdx,%r8,4), %r9d
subl -36(%rdx,%r8,4), %r9d
addl -32(%rdx,%r8,4), %r9d
movl %r9d, (%rdx,%r8,4)
incq %rsi
incq %rdi
addq $32, %rdx
addq $7, %rax
cmpq $4, %rsi
jne .LBB5_18
# %bb.21: # %_Z16ComputeIntegralsPKhPi.exit
movl $.Lstr.5, %edi
callq puts@PLT
callq clock
subq %rbx, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI5_0(%rip), %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
movl $.Lstr.1, %edi
callq puts@PLT
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_22: # %.preheader63
# =>This Loop Header: Depth=1
# Child Loop BB5_23 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_23: # Parent Loop BB5_22 Depth=1
# => This Inner Loop Header: Depth=2
movq 16(%rsp), %rax
addq %rbx, %rax
movzbl (%r15,%rax), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
incq %r15
cmpq $7, %r15
jne .LBB5_23
# %bb.24: # in Loop: Header=BB5_22 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $7, %rbx
cmpq $3, %r14
jne .LBB5_22
# %bb.25:
movl $.Lstr.5, %edi
callq puts@PLT
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.5, %edi
callq puts@PLT
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_26: # %.preheader62
# =>This Loop Header: Depth=1
# Child Loop BB5_27 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_27: # Parent Loop BB5_26 Depth=1
# => This Inner Loop Header: Depth=2
movq (%rsp), %rax
addq %rbx, %rax
movl (%rax,%r15,4), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
incq %r15
cmpq $8, %r15
jne .LBB5_27
# %bb.28: # in Loop: Header=BB5_26 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $32, %rbx
cmpq $4, %r14
jne .LBB5_26
# %bb.29:
movl $.Lstr.5, %edi
callq puts@PLT
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.5, %edi
callq puts@PLT
movq (%rsp), %rax
xorpd %xmm0, %xmm0
movupd %xmm0, (%rax)
movupd %xmm0, 16(%rax)
movupd %xmm0, 32(%rax)
movupd %xmm0, 48(%rax)
movupd %xmm0, 64(%rax)
movupd %xmm0, 80(%rax)
movupd %xmm0, 96(%rax)
movupd %xmm0, 112(%rax)
callq clock
movq %rax, %rbx
movq (%rsp), %rdi
movq 24(%rsp), %rsi
movq 8(%rsp), %rdx
movl $3, %ecx
movl $7, %r8d
callq _Z13PrefixSumRowsPiS_S_ii
callq clock
subq %rbx, %rax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
divsd .LCPI5_0(%rip), %xmm0
movl $.L.str.9, %edi
movb $1, %al
callq printf
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB5_30: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_31 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_31: # Parent Loop BB5_30 Depth=1
# => This Inner Loop Header: Depth=2
movq 8(%rsp), %rax
addq %rbx, %rax
movl (%rax,%r15,4), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
incq %r15
cmpq $8, %r15
jne .LBB5_31
# %bb.32: # in Loop: Header=BB5_30 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $32, %rbx
cmpq $4, %r14
jne .LBB5_30
# %bb.33:
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $32, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9transposePiS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17KernPrefixSumRowsPiS_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z22KernPrefixSumRowsTransPiS_ii, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9transposePiS_ii,@object # @_Z9transposePiS_ii
.section .rodata,"a",@progbits
.globl _Z9transposePiS_ii
.p2align 3, 0x0
_Z9transposePiS_ii:
.quad _Z24__device_stub__transposePiS_ii
.size _Z9transposePiS_ii, 8
.type _Z17KernPrefixSumRowsPiS_ii,@object # @_Z17KernPrefixSumRowsPiS_ii
.globl _Z17KernPrefixSumRowsPiS_ii
.p2align 3, 0x0
_Z17KernPrefixSumRowsPiS_ii:
.quad _Z32__device_stub__KernPrefixSumRowsPiS_ii
.size _Z17KernPrefixSumRowsPiS_ii, 8
.type _Z22KernPrefixSumRowsTransPiS_ii,@object # @_Z22KernPrefixSumRowsTransPiS_ii
.globl _Z22KernPrefixSumRowsTransPiS_ii
.p2align 3, 0x0
_Z22KernPrefixSumRowsTransPiS_ii:
.quad _Z37__device_stub__KernPrefixSumRowsTransPiS_ii
.size _Z22KernPrefixSumRowsTransPiS_ii, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "hipMalloc returned error %s (code %d) (file %s) (line %d)\n"
.size .L.str.1, 59
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/r-abishek/standaloneCodeTrials/main/integralImage/integralImageExclusive.hip"
.size .L.str.2, 134
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "CPU Time Taken: %f\n"
.size .L.str.3, 20
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "%d "
.size .L.str.5, 4
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Output CPU"
.size .L.str.7, 11
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "OUTPUT GPU"
.size .L.str.8, 11
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "GPU Time Taken: %f\n"
.size .L.str.9, 20
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9transposePiS_ii"
.size .L__unnamed_1, 19
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z17KernPrefixSumRowsPiS_ii"
.size .L__unnamed_2, 28
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z22KernPrefixSumRowsTransPiS_ii"
.size .L__unnamed_3, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.1,@object # @str.1
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.1:
.asciz "Input\n"
.size .Lstr.1, 7
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "\n"
.size .Lstr.5, 2
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__transposePiS_ii
.addrsig_sym _Z32__device_stub__KernPrefixSumRowsPiS_ii
.addrsig_sym _Z37__device_stub__KernPrefixSumRowsTransPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9transposePiS_ii
.addrsig_sym _Z17KernPrefixSumRowsPiS_ii
.addrsig_sym _Z22KernPrefixSumRowsTransPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // in this code we do not use sparsity because the matrices are small
// all matrices therefore are full
// we assume that all the matrices are stored in a linear array, in a column major form
// the maximum graph size is set to 12 in the variable MAX_N_PERM but it can be increased if the GPU is more powerfull
// TO-DO: change the code so that we can scan just a fraction of all the n! possible permutations.
// This will allow using multiple GPUs in parallel
#include<stdio.h>
#include <time.h>
#include<float.h>
#define MAX_N_PERM 12
typedef unsigned int lint;
#define gerror(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// the code will allow us to use different norms
// the matrices are assume square
// the norm takes as input a permutation as well as two matrices
// do not forget that P is from 1...n while the indices in C are from 0 to n-1
__device__ __host__ float fro_norm_square(int n, float *A , float *B, int *perm){
float total = 0;
for (int i = 0; i < n; i++){ //go along column
for (int j = 0; j < n; j++){ // go along row
float value = A[i + n*j] - B[ (perm[i]-1) + n*(perm[j]-1)];
//printf("-- (% f , %f, %f, i = %d, perm_i = %d,j =%d , perm_j = %d)-- ", A[i + n*j],B[ (perm[i]-1) + n*(perm[j]-1) ],value, i, perm[i]-1, j, perm[j]-1);
total = total + value*value;
}
}
return total;
}
__device__ float (*d_ptr_fro_norm_square)(int , float * , float *, int *) = fro_norm_square;
// notice that if the matrices are just adjecency matrices where all the weights are 1 then there is no difference between
// having this L1_norm or the fro_norm_square above
__device__ __host__ float L1_norm(int n, float *A , float *B, int *perm){
float total = 0;
for (int i = 0; i < n; i++){ //go along column
for (int j = 0; j < n; j++){ // go along row
float value = A[i + n*j] - B[ (perm[i]-1) + n*(perm[j]-1)];
total = total + abs(value);
}
}
return total;
}
__device__ float (*d_ptr_L1_norm)(int , float * , float *, int *) = L1_norm;
// this function sets a list to consecutive numbers
__host__ __device__ inline void settoconsec(int *v, int n){
for (int i = 0; i < n; i++){
v[i] = i+1;
}
}
inline void copyvec(int *v_dest, int * v_source, int n){
for (int i = 0; i < n; i++){
v_dest[i] = v_source[i];
}
}
// this function swaps two elelents
__host__ __device__ inline void swap(int *v, int ix1, int ix2){
int tmp = v[ix1];
v[ix1] = v[ix2];
v[ix2] = tmp;
}
// this function prints a vector
__host__ __device__ void printvec(int *v, int n){
for (int i = 0; i < n ; i ++){
printf("%d ", v[i]);
}
printf("\n");
}
// this function prints a vector
__host__ __device__ void printfloatvec(float *v, int n){
for (int i = 0; i < n ; i ++){
printf("%f ", v[i]);
}
printf("\n");
}
// first we have a piece of code that computes the thing in serial form
// we assume that the matrices A and B are square and of the same dimension
// we do not use sparse matrices because the matrices are small anyway
// the norm we are computing is min_P || A - P^T B P ||
// we will also return the best permutation
float compute_optimal_match(int n, float *A, float *B, float (*metric)(int , float* , float *, int* ), int * bestperm ){
float opt_val = FLT_MAX;
int * v = (int *) malloc(n * sizeof(int) );
int * output = (int *) malloc(n * sizeof(int) );
settoconsec(v, n);
while( v[n-1] <= n ){
// note that the way we are going the swap here is a bit different because
// the elements from v are already in increasing form. Like
// 1 2 3 , 2 2 3 , 3 2 3, 1 3 3 , 2 3 3 , 3 3 3
// while in the parallel code the v is in the form
// 1 1 1 , 2 1 1, 3 1 1, 1 2 1, 2 2 1 , 3 2 1
settoconsec(output, n);
for( int i = 0; i < n ; i++){
swap(output, i, v[i]-1);
}
// at this point the vector output contains a permutation and we can compute a distance
float val = (*metric)( n , A , B , output );
if ( val < opt_val ){
opt_val = val;
copyvec( bestperm , output , n );
}
v[ 0 ] = v[ 0 ] + 1;
for (int i = 0; i < n-1 ; i++){
if( v[i] > n ){
v[i] = i+1;
v[ i + 1 ] = v[ i + 1 ] + 1;
}
}
}
free(output);
free(v);
return opt_val;
}
// this function transforms and index into a permutation
// the function requires a bit of scrap space
__device__ __host__ void index_to_perm(lint r, int n, int *perm, int * scrap){
for (int i = n ; i >=1; i--){
scrap[n - i] = (r % i) + 1;
r = r/i;
}
// note that the way we are going the swap here is a bit different because
// the elements from v are not in increasing form like in the cpu code.
// In the parallel code the scrap is in the form
// 1 1 1 , 2 1 1, 3 1 1, 1 2 1, 2 2 1 , 3 2 1
// but in the serial code it is
// 1 2 3 , 2 2 3 , 3 2 3, 1 3 3 , 2 3 3 , 3 3 3
settoconsec(perm, n);
for( int i = 0; i < n ; i++){
swap(perm, i, i + scrap[i]-1);
}
}
inline int fact(int n){
if (n <=1)
return 1;
else
return n*fact(n-1);
}
// this computes the optimal matching my testing different permutations in parallel
// we pass the nfact from outside to save time
// we cannot store the result of all evaluations in memory and then do a parallel max.
// there is just too much stuff to try. So each thread needs to keep a local max of several trials
__global__ void kernel_to_compute_optimal_match(int chunck_per_cycle, int num_perm_per_thread, lint nfact, int n, float *A, float *B, float (*metric)(int , float* , float *, int* ), float * obj_vals, lint * obj_perms ){
int baseix = blockIdx.x*blockDim.x + threadIdx.x;;
lint ix = baseix;
// we copy A and B to shared memory because it might be faster when we are computing the norms
extern __shared__ float AB_shared_mem[];
// we need to split the shared memory into different parts
float * shared_A = AB_shared_mem;
float * shared_B = &AB_shared_mem[n*n];
// the first thread of each block does the copy for the corresponding block
if (threadIdx.x == 0){
for (int i = 0; i < n*n ; i++){
shared_A[i] = A[i];
shared_B[i] = B[i];
}
}
__syncthreads();
float best_val = FLT_MAX;
lint best_perm_ix;
for (int i = 0; i < num_perm_per_thread ; i++){
ix = baseix + chunck_per_cycle*i;
// filter the stuff that does not matter
if (ix < nfact){
// probably we do not need more than 20 here
int perm[MAX_N_PERM];
int scrap[MAX_N_PERM];
index_to_perm( ix , n, perm, scrap);
float val = (*metric)( n, shared_A , shared_B, perm);
if (val < best_val){
best_val = val;
best_perm_ix = ix;
}
}
}
obj_vals[baseix] = best_val;
obj_perms[baseix] = best_perm_ix;
}
void test_index_perm(int n ){
// test the function that indexes permutations sequentially
int *perm = (int *) malloc(n * sizeof(int));
int *scrap = (int *) malloc(n * sizeof(int));
for (int r = 0; r < fact(n) ; r++){
index_to_perm(r, n, perm, scrap);
//printvec(perm,n);
}
free(perm);
free(scrap);
}
// this function will allocate space for A
float * read_graph_into_adj_mat(char * filename, int *graphsize, int directed){
// if we are not given the graph size then we first read the file to try to estimate the size
// of the graph by trying to find the largest index used
// here we assume that the indices used are 1, 2, ..., n
if (*graphsize == -1){
FILE * graphfile = fopen(filename , "r");
int dim = -1;
int edge1, edge2;
while ( fscanf(graphfile, "%d %d\n", &edge1, &edge2) != EOF){
if (dim < edge1){
dim = edge1;
}
if (dim < edge2){
dim = edge2;
}
}
fclose(graphfile);
*graphsize = dim;
}
// we use calloc because we want most of the entries to be zero and just have to set a few to non-zero
// whatever edges are not specified in the file we are reading we will assume are zero
float *A = (float *) calloc( (*graphsize) , (*graphsize)*sizeof(float) ) ;
FILE * graphfile = fopen(filename , "r");
int edge1, edge2;
while ( fscanf(graphfile, "%d %d\n", &edge1, &edge2) != EOF){
if (edge1 <= (*graphsize) && edge2 <= (*graphsize) && edge1 >=1 && edge2 >=1 ){
A[(edge1-1) + (edge2-1)* (*graphsize) ] = 1;
// if the graph is undirected, we force it to be undirected
if (directed == 0){
A[(edge2-1) + (edge1-1)* (*graphsize) ] = 1;
}
}
}
fclose(graphfile);
return A;
}
// this writes a vector to an output file
void save_vec_to_file(int * vec, int n , char* output_file){
// we only write if there is stuff to write. Otherwise we leave things as they are
if (n > 1){
FILE * vec_file = fopen(output_file , "w");
for (int i = 0; i < n-1; i++){
fprintf(vec_file,"%d ", vec[i]);
}
fprintf(vec_file,"%d", vec[n-1]);
fclose(vec_file);
}
}
int main(int argc,char *argv[]){
if (argc != 8){
printf("The arguments must be filenameA, filenameB, outputfile, L1vsL2, directed/undirected, gpu/cpu, size\n");
return 0;
}
char * filenameA = (char *) argv[1];
char * filenameB = (char *) argv[2];
char * fileoutput = (char *) argv[3];
int norm_to_use = atoi( argv[4] );
int directed = atoi( argv[5] );
int cpu_vs_gpu = atoi( argv[6] );
int graphsize = atoi( argv[7] );
int sizeA = graphsize;
int sizeB = graphsize;
float *A = read_graph_into_adj_mat( filenameA , &sizeA , directed );
float *B = read_graph_into_adj_mat( filenameB , &sizeB , directed );
if ( sizeA != sizeB ){
printf("Error, graphs of different sizes\n");
return 0;
}
clock_t cpu_start, cpu_end;
float cputime;
int n = sizeA;
lint nfact = fact(n);
if (cpu_vs_gpu == 1){
int * bestperm = (int *) malloc(n * sizeof( int ) ); //this is where we will keep the best perm
cpu_start = clock();
// we might want to try different norms
float val;
if (norm_to_use == 1){
val = compute_optimal_match(n, A, B, &L1_norm , bestperm);
}
if (norm_to_use == 2){
val = compute_optimal_match(n, A, B, &fro_norm_square , bestperm);
}
cpu_end = clock();
printf("CPU Opt Val = %f\n", val);
printvec(bestperm, n);
cputime = (float)(cpu_end - cpu_start) / CLOCKS_PER_SEC;
printf("SERIAL: Time to compute opt = %f\n",cputime); fflush(stdout);
// store the vector in the output
save_vec_to_file(bestperm, n , fileoutput);
// free the vector
free(bestperm);
}else{
// now we have some GPU code
cudaSetDevice( 0 );
cudaDeviceReset();
// here we compute the division of work
// we try to make everything result in an iteger division of work
int numthreadsperblock = 1024;
int numblocks = 1024;
int chunck_per_cycle = numblocks*numthreadsperblock;
int num_stuff_per_thread = 1 + (nfact / chunck_per_cycle );
//printf("Threads per block = %d, Num blocks = %d , chunck_per_cycle = %d, num_stuff_per_thread = %d\n",numthreadsperblock,numblocks,chunck_per_cycle,num_stuff_per_thread);
float * d_A;
float * d_B;
float * d_obj_vals;
lint * d_obj_perms;
float * h_obj_vals = (float *) malloc( chunck_per_cycle*sizeof(float) );
lint * h_obj_perms = (lint *) malloc( chunck_per_cycle*sizeof(lint) );
// create some timing variables
cudaEvent_t gpu_start, gpu_end;
float gputime;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_end);
cudaEventRecord(gpu_start, 0);
cudaMalloc((void **)&d_A, n*n*sizeof(float) );
cudaMalloc((void **)&d_B, n*n*sizeof(float) );
cudaMalloc((void **)&d_obj_vals, chunck_per_cycle*sizeof(float) );
cudaMalloc((void **)&d_obj_perms, chunck_per_cycle*sizeof(lint) );
cudaMemcpy( (void*) d_A , (void*) A , n*n*sizeof(float) , cudaMemcpyHostToDevice );
cudaMemcpy( (void*) d_B , (void*) B , n*n*sizeof(float) , cudaMemcpyHostToDevice );
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
cudaEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to allocate space: %f\n", gputime/1000); fflush(stdout);
// this is the function pointer that we will pass to the GPU
float (*h_d_per_metric)(int , float *, float * , int * );
// we might want to use different norms
if (norm_to_use == 1){
cudaMemcpyFromSymbol(&h_d_per_metric, d_ptr_L1_norm, sizeof( float (*)(int , float *, float * , int * ) ));
}
if (norm_to_use == 2){
cudaMemcpyFromSymbol(&h_d_per_metric, d_ptr_fro_norm_square, sizeof( float (*)(int , float *, float * , int * ) ));
}
cudaEventRecord(gpu_start, 0);
kernel_to_compute_optimal_match<<<numblocks,numthreadsperblock,n*n*2*sizeof(float)>>>(chunck_per_cycle,num_stuff_per_thread , nfact, n, d_A, d_B, h_d_per_metric , d_obj_vals, d_obj_perms);
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
cudaEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to run the kernel: %f\n", gputime/1000); fflush(stdout);
// now we copy the stuff back to the CPU and get the maximum by hand
cudaEventRecord(gpu_start, 0);
cudaMemcpy( (void*) h_obj_vals , (void*) d_obj_vals , chunck_per_cycle*sizeof(float) , cudaMemcpyDeviceToHost );
cudaMemcpy( (void*) h_obj_perms , (void*) d_obj_perms , chunck_per_cycle*sizeof(float) , cudaMemcpyDeviceToHost );
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
cudaEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to copy stuff back to the CPU: %f\n", gputime/1000); fflush(stdout);
cpu_start = clock();
float best_gpu_val = FLT_MAX;
lint best_ix;
for (int i = 0 ; i < chunck_per_cycle ; i++){
float val = h_obj_vals[i];
if (val < best_gpu_val){
best_gpu_val = val;
best_ix = i;
}
}
int * perm = (int *) malloc(n * sizeof(int));
int * scrap = (int *) malloc(n * sizeof(int));
index_to_perm(best_ix, n, perm, scrap);
printf("GPU Opt Val = %f\n", best_gpu_val);
printvec(perm, n);
save_vec_to_file(perm, n , fileoutput);
cpu_end = clock();
cputime = (float)(cpu_end - cpu_start) / CLOCKS_PER_SEC;
printf("SERIAL: Time to compute the last step = %f\n",cputime); fflush(stdout);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_obj_vals);
cudaFree(d_obj_perms);
free(h_obj_vals);
free(h_obj_perms);
free(perm);
free(scrap);
gerror( cudaPeekAtLastError() );
cudaDeviceSynchronize();
}
free(A);
free(B);
return 0;
} | .file "tmpxft_0007f09f_00000000-6_bruteforce.cudafe1.cpp"
.text
#APP
#NO_APP
.globl _Z15fro_norm_squareiPfS_Pi
.type _Z15fro_norm_squareiPfS_Pi, @function
_Z15fro_norm_squareiPfS_Pi:
.LFB2058:
.cfi_startproc
endbr64
testl %edi, %edi
jle .L5
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %edi, %r8d
movq %rdx, %r9
movq %rcx, %rbp
movq %rsi, %rbx
movslq %edi, %r12
leaq 0(,%r12,4), %r10
leaq (%r10,%rcx), %r11
movl $0, %esi
pxor %xmm1, %xmm1
.L3:
movl 0(%rbp,%rsi,4), %eax
leal -1(%rax), %edi
movq %rbp, %rdx
movq %rbx, %rcx
.L4:
movl (%rdx), %eax
subl $1, %eax
imull %r8d, %eax
addl %edi, %eax
cltq
movss (%rcx), %xmm0
subss (%r9,%rax,4), %xmm0
mulss %xmm0, %xmm0
addss %xmm0, %xmm1
addq %r10, %rcx
addq $4, %rdx
cmpq %r11, %rdx
jne .L4
addq $1, %rsi
addq $4, %rbx
cmpq %r12, %rsi
jne .L3
movaps %xmm1, %xmm0
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
pxor %xmm1, %xmm1
movaps %xmm1, %xmm0
ret
.cfi_endproc
.LFE2058:
.size _Z15fro_norm_squareiPfS_Pi, .-_Z15fro_norm_squareiPfS_Pi
.globl _Z7L1_normiPfS_Pi
.type _Z7L1_normiPfS_Pi, @function
_Z7L1_normiPfS_Pi:
.LFB2059:
.cfi_startproc
endbr64
testl %edi, %edi
jle .L15
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdx, %r8
movq %rcx, %rbp
movq %rsi, %rbx
movslq %edi, %r12
leaq 0(,%r12,4), %r9
leaq (%r9,%rcx), %r10
movl $0, %r11d
pxor %xmm1, %xmm1
movss .LC1(%rip), %xmm2
.L13:
movl 0(%rbp,%r11,4), %eax
leal -1(%rax), %esi
movq %rbp, %rdx
movq %rbx, %rcx
.L14:
movl (%rdx), %eax
subl $1, %eax
imull %edi, %eax
addl %esi, %eax
cltq
movss (%rcx), %xmm0
subss (%r8,%rax,4), %xmm0
andps %xmm2, %xmm0
addss %xmm0, %xmm1
addq %r9, %rcx
addq $4, %rdx
cmpq %r10, %rdx
jne .L14
addq $1, %r11
addq $4, %rbx
cmpq %r12, %r11
jne .L13
movaps %xmm1, %xmm0
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
pxor %xmm1, %xmm1
movaps %xmm1, %xmm0
ret
.cfi_endproc
.LFE2059:
.size _Z7L1_normiPfS_Pi, .-_Z7L1_normiPfS_Pi
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2074:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2074:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "%d "
.LC3:
.string "\n"
.text
.globl _Z8printvecPii
.type _Z8printvecPii, @function
_Z8printvecPii:
.LFB2063:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
testl %esi, %esi
jle .L24
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %r12
leaq .LC2(%rip), %rbp
.L25:
movl (%rbx), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L25
.L24:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _Z8printvecPii, .-_Z8printvecPii
.section .rodata.str1.1
.LC4:
.string "%f "
.text
.globl _Z13printfloatvecPfi
.type _Z13printfloatvecPfi, @function
_Z13printfloatvecPfi:
.LFB2064:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
testl %esi, %esi
jle .L29
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %r12
leaq .LC4(%rip), %rbp
.L30:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L30
.L29:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _Z13printfloatvecPfi, .-_Z13printfloatvecPfi
.globl _Z21compute_optimal_matchiPfS_PFfiS_S_PiES0_
.type _Z21compute_optimal_matchiPfS_PFfiS_S_PiES0_, @function
_Z21compute_optimal_matchiPfS_PFfiS_S_PiES0_:
.LFB2065:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movl %edi, %r12d
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 40(%rsp)
movslq %edi, %r14
leaq 0(,%r14,4), %r13
movq %r13, %rdi
call malloc@PLT
movq %rax, %rbp
movq %r13, %rdi
call malloc@PLT
movq %rax, %rbx
testl %r12d, %r12d
jle .L34
movl $1, %eax
.L35:
movl %eax, -4(%rbp,%rax,4)
movq %rax, %rdx
addq $1, %rax
cmpq %rdx, %r14
jne .L35
.L34:
leaq -4(%rbp,%r13), %rcx
movss .LC5(%rip), %xmm2
movss %xmm2, 4(%rsp)
movl %r12d, %r15d
cmpl (%rcx), %r12d
jl .L37
movq %rcx, 32(%rsp)
jmp .L36
.L68:
testl %r12d, %r12d
jg .L49
addl $1, 0(%rbp)
movss %xmm0, 4(%rsp)
jmp .L43
.L49:
movl $0, %eax
movq 40(%rsp), %rcx
.L42:
movl (%rbx,%rax), %edx
movl %edx, (%rcx,%rax)
addq $4, %rax
cmpq %rax, %r13
jne .L42
movss %xmm0, 4(%rsp)
jmp .L50
.L44:
addq $1, %rax
cmpq %rax, %r15
je .L43
.L45:
cmpl %r12d, -4(%rbp,%rax,4)
jle .L44
movl %eax, -4(%rbp,%rax,4)
addl $1, 0(%rbp,%rax,4)
jmp .L44
.L69:
movq %rbx, %rcx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movl %r12d, %edi
movq 24(%rsp), %rax
call *%rax
movss 4(%rsp), %xmm3
comiss %xmm0, %xmm3
ja .L68
addl $1, 0(%rbp)
.L43:
movq 32(%rsp), %rax
cmpl %r12d, (%rax)
jg .L37
.L36:
movl $1, %eax
testl %r12d, %r12d
jle .L69
.L38:
movl %eax, -4(%rbx,%rax,4)
movq %rax, %rdx
addq $1, %rax
cmpq %rdx, %r14
jne .L38
movl $0, %eax
.L39:
movl (%rbx,%rax), %ecx
movl 0(%rbp,%rax), %edi
leal -1(%rdi), %edx
movslq %edx, %rdx
leaq (%rbx,%rdx,4), %rdx
movl (%rdx), %esi
movl %esi, (%rbx,%rax)
movl %ecx, (%rdx)
addq $4, %rax
cmpq %rax, %r13
jne .L39
movq %rbx, %rcx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movl %r12d, %edi
movq 24(%rsp), %rax
call *%rax
movss 4(%rsp), %xmm1
comiss %xmm0, %xmm1
ja .L49
.L50:
addl $1, 0(%rbp)
cmpl $1, %r12d
jle .L43
movl $1, %eax
jmp .L45
.L37:
movq %rbx, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movss 4(%rsp), %xmm0
addq $56, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _Z21compute_optimal_matchiPfS_PFfiS_S_PiES0_, .-_Z21compute_optimal_matchiPfS_PFfiS_S_PiES0_
.globl _Z13index_to_permjiPiS_
.type _Z13index_to_permjiPiS_, @function
_Z13index_to_permjiPiS_:
.LFB2066:
.cfi_startproc
endbr64
movl %edi, %eax
movq %rdx, %rdi
movq %rcx, %r9
testl %esi, %esi
jle .L70
movq %rcx, %r8
movl %esi, %ecx
.L72:
movl $0, %edx
divl %ecx
addl $1, %edx
movl %edx, (%r8)
addq $4, %r8
subl $1, %ecx
jne .L72
movslq %esi, %rsi
movl $1, %eax
.L73:
movl %eax, -4(%rdi,%rax,4)
movq %rax, %rdx
addq $1, %rax
cmpq %rsi, %rdx
jne .L73
movl $0, %eax
.L74:
movl (%rdi,%rax,4), %ecx
movl %eax, %edx
addl (%r9,%rax,4), %edx
subl $1, %edx
movslq %edx, %rdx
leaq (%rdi,%rdx,4), %rdx
movl (%rdx), %r8d
movl %r8d, (%rdi,%rax,4)
movl %ecx, (%rdx)
addq $1, %rax
cmpq %rsi, %rax
jne .L74
.L70:
ret
.cfi_endproc
.LFE2066:
.size _Z13index_to_permjiPiS_, .-_Z13index_to_permjiPiS_
.section .text._Z4facti,"axG",@progbits,_Z4facti,comdat
.weak _Z4facti
.type _Z4facti, @function
_Z4facti:
.LFB2067:
.cfi_startproc
endbr64
movl $1, %eax
cmpl $1, %edi
jle .L90
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl %edi, %ebx
leal -1(%rdi), %ebp
cmpl $1, %ebp
jle .L80
leal -2(%rdi), %r12d
cmpl $1, %r12d
jle .L81
leal -3(%rdi), %r13d
cmpl $1, %r13d
jle .L82
leal -4(%rdi), %r14d
cmpl $1, %r14d
jle .L83
leal -5(%rdi), %edi
call _Z4facti
imull %r14d, %eax
.L83:
imull %r13d, %eax
.L82:
imull %r12d, %eax
.L81:
imull %ebp, %eax
.L80:
imull %ebx, %eax
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L90:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
ret
.cfi_endproc
.LFE2067:
.size _Z4facti, .-_Z4facti
.text
.globl _Z15test_index_permi
.type _Z15test_index_permi, @function
_Z15test_index_permi:
.LFB2068:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %edi, %ebx
movslq %edi, %rbp
salq $2, %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r15
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r14
movl $0, %ebp
movl $1, %r12d
jmp .L94
.L110:
leal -6(%rbx), %edi
call _Z4facti
movl 12(%rsp), %ecx
imull %eax, %ecx
movl %ecx, %eax
.L100:
movl 8(%rsp), %esi
imull %eax, %esi
movl %esi, %eax
.L99:
movl 4(%rsp), %edi
imull %eax, %edi
movl %edi, %eax
.L98:
movl (%rsp), %esi
imull %eax, %esi
movl %esi, %eax
.L97:
imull %r13d, %eax
.L96:
imull %ebx, %eax
.L95:
cmpl %eax, %ebp
jge .L109
movq %r14, %rcx
movq %r15, %rdx
movl %ebx, %esi
movl %ebp, %edi
call _Z13index_to_permjiPiS_
addl $1, %ebp
.L94:
movl %r12d, %eax
cmpl $1, %ebx
jle .L95
leal -1(%rbx), %r13d
cmpl $1, %r13d
jle .L96
leal -2(%rbx), %eax
movl %eax, %edx
movl %eax, (%rsp)
movl %r12d, %eax
cmpl $1, %edx
jle .L97
leal -3(%rbx), %eax
movl %eax, %ecx
movl %eax, 4(%rsp)
movl %r12d, %eax
cmpl $1, %ecx
jle .L98
leal -4(%rbx), %eax
movl %eax, %esi
movl %eax, 8(%rsp)
movl %r12d, %eax
cmpl $1, %esi
jle .L99
leal -5(%rbx), %eax
movl %eax, %ecx
movl $1, %eax
movl %ecx, 12(%rsp)
cmpl $1, %ecx
jg .L110
jmp .L100
.L109:
movq %r15, %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2068:
.size _Z15test_index_permi, .-_Z15test_index_permi
.section .rodata.str1.1
.LC6:
.string "r"
.LC7:
.string "%d %d\n"
.text
.globl _Z23read_graph_into_adj_matPcPii
.type _Z23read_graph_into_adj_matPcPii, @function
_Z23read_graph_into_adj_matPcPii:
.LFB2069:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r12
movq %rsi, %rbx
movl %edx, %r14d
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movl (%rsi), %ebp
cmpl $-1, %ebp
je .L120
.L112:
movslq (%rbx), %rdi
leaq 0(,%rdi,4), %rsi
call calloc@PLT
movq %rax, %r13
leaq .LC6(%rip), %rsi
movq %r12, %rdi
call fopen@PLT
movq %rax, %rbp
leaq .LC7(%rip), %r12
.L115:
leaq 4(%rsp), %rcx
movq %rsp, %rdx
movq %r12, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
cmpl $-1, %eax
je .L121
movl (%rbx), %edx
movl (%rsp), %eax
cmpl %eax, %edx
jl .L115
movl 4(%rsp), %ecx
testl %eax, %eax
setg %dil
testl %ecx, %ecx
setg %sil
testb %sil, %dil
je .L115
cmpl %ecx, %edx
jl .L115
subl $1, %eax
subl $1, %ecx
movl %edx, %esi
imull %ecx, %esi
addl %eax, %esi
movslq %esi, %rsi
movl $0x3f800000, 0(%r13,%rsi,4)
testl %r14d, %r14d
jne .L115
imull %eax, %edx
leal (%rdx,%rcx), %eax
cltq
movl $0x3f800000, 0(%r13,%rax,4)
jmp .L115
.L120:
leaq .LC6(%rip), %rsi
call fopen@PLT
movq %rax, %r13
leaq .LC7(%rip), %r15
.L113:
leaq 4(%rsp), %rcx
movq %rsp, %rdx
movq %r15, %rsi
movq %r13, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
cmpl $-1, %eax
je .L122
movl (%rsp), %eax
movl 4(%rsp), %edx
cmpl %edx, %eax
cmovl %edx, %eax
cmpl %eax, %ebp
cmovl %eax, %ebp
jmp .L113
.L122:
movq %r13, %rdi
call fclose@PLT
movl %ebp, (%rbx)
jmp .L112
.L121:
movq %rbp, %rdi
call fclose@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L123
movq %r13, %rax
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L123:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2069:
.size _Z23read_graph_into_adj_matPcPii, .-_Z23read_graph_into_adj_matPcPii
.section .rodata.str1.1
.LC9:
.string "w"
.LC10:
.string "%d"
.text
.globl _Z16save_vec_to_filePiiPc
.type _Z16save_vec_to_filePiiPc, @function
_Z16save_vec_to_filePiiPc:
.LFB2070:
.cfi_startproc
endbr64
cmpl $1, %esi
jg .L132
ret
.L132:
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r13
movl %esi, %ebp
leaq .LC9(%rip), %rsi
movq %rdx, %rdi
call fopen@PLT
movq %rax, %r12
movq %r13, %rbx
leal -2(%rbp), %eax
leaq 4(%r13,%rax,4), %r15
leaq .LC2(%rip), %r14
.L126:
movl (%rbx), %ecx
movq %r14, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $4, %rbx
cmpq %r15, %rbx
jne .L126
movslq %ebp, %rbp
movl -4(%r13,%rbp,4), %ecx
leaq .LC10(%rip), %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
movq %r12, %rdi
call fclose@PLT
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2070:
.size _Z16save_vec_to_filePiiPc, .-_Z16save_vec_to_filePiiPc
.globl _Z72__device_stub__Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_PjiijiPfS_PFfiS_S_PiES_Pj
.type _Z72__device_stub__Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_PjiijiPfS_PFfiS_S_PiES_Pj, @function
_Z72__device_stub__Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_PjiijiPfS_PFfiS_S_PiES_Pj:
.LFB2096:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movl %edx, 52(%rsp)
movl %ecx, 48(%rsp)
movq %r8, 40(%rsp)
movq %r9, 32(%rsp)
movq 224(%rsp), %rax
movq %rax, 24(%rsp)
movq 232(%rsp), %rax
movq %rax, 16(%rsp)
movq 240(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 52(%rsp), %rax
movq %rax, 144(%rsp)
leaq 48(%rsp), %rax
movq %rax, 152(%rsp)
leaq 40(%rsp), %rax
movq %rax, 160(%rsp)
leaq 32(%rsp), %rax
movq %rax, 168(%rsp)
leaq 24(%rsp), %rax
movq %rax, 176(%rsp)
leaq 16(%rsp), %rax
movq %rax, 184(%rsp)
leaq 8(%rsp), %rax
movq %rax, 192(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L137
.L133:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L138
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L137:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L133
.L138:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2096:
.size _Z72__device_stub__Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_PjiijiPfS_PFfiS_S_PiES_Pj, .-_Z72__device_stub__Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_PjiijiPfS_PFfiS_S_PiES_Pj
.globl _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj
.type _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj, @function
_Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj:
.LFB2097:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
pushq 40(%rsp)
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z72__device_stub__Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_PjiijiPfS_PFfiS_S_PiES_Pj
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2097:
.size _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj, .-_Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC11:
.string "The arguments must be filenameA, filenameB, outputfile, L1vsL2, directed/undirected, gpu/cpu, size\n"
.align 8
.LC12:
.string "Error, graphs of different sizes\n"
.section .rodata.str1.1
.LC13:
.string "CPU Opt Val = %f\n"
.section .rodata.str1.8
.align 8
.LC15:
.string "SERIAL: Time to compute opt = %f\n"
.align 8
.LC17:
.string "PARALLEL: Time it took to allocate space: %f\n"
.align 8
.LC18:
.string "PARALLEL: Time it took to run the kernel: %f\n"
.align 8
.LC19:
.string "PARALLEL: Time it took to copy stuff back to the CPU: %f\n"
.section .rodata.str1.1
.LC20:
.string "GPU Opt Val = %f\n"
.section .rodata.str1.8
.align 8
.LC21:
.string "SERIAL: Time to compute the last step = %f\n"
.align 8
.LC22:
.string "/home/ubuntu/Datasets/stackv2/train-structured/bentoayr/exact_graph_match/master/bruteforce.cu"
.section .rodata.str1.1
.LC23:
.string "GPUassert: %s %s %d\n"
.text
.globl main
.type main, @function
main:
.LFB2071:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $168, %rsp
.cfi_def_cfa_offset 224
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
cmpl $8, %edi
je .L142
leaq .LC11(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
.L143:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L160
movl $0, %eax
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L142:
.cfi_restore_state
movq %rsi, %rbp
movq 8(%rsi), %r14
movq 16(%rsi), %r13
movq 24(%rsi), %r15
movq 32(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, (%rsp)
movq 40(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r12d
movq 48(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, 24(%rsp)
movq 56(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 60(%rsp)
movl %eax, 64(%rsp)
leaq 60(%rsp), %rsi
movl %r12d, %edx
movq %r14, %rdi
call _Z23read_graph_into_adj_matPcPii
movq %rax, 8(%rsp)
leaq 64(%rsp), %rsi
movl %r12d, %edx
movq %r13, %rdi
call _Z23read_graph_into_adj_matPcPii
movq %rax, 16(%rsp)
movl 60(%rsp), %r12d
cmpl 64(%rsp), %r12d
je .L144
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L143
.L144:
movl %r12d, %edi
call _Z4facti
movl %eax, 32(%rsp)
cmpl $1, 24(%rsp)
je .L161
movl $0, %edi
call cudaSetDevice@PLT
call cudaDeviceReset@PLT
movl $4194304, %edi
call malloc@PLT
movq %rax, %rbp
movl $4194304, %edi
call malloc@PLT
movq %rax, 24(%rsp)
leaq 104(%rsp), %rdi
call cudaEventCreate@PLT
leaq 112(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 104(%rsp), %rdi
call cudaEventRecord@PLT
movl %r12d, %r14d
imull %r12d, %r14d
movslq %r14d, %r13
salq $2, %r13
leaq 72(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
leaq 88(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 96(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
movl $1, %ecx
movq %r13, %rdx
movq 8(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r13, %rdx
movq 16(%rsp), %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 112(%rsp), %rdi
call cudaEventRecord@PLT
movq 112(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 68(%rsp), %rdi
movq 112(%rsp), %rdx
movq 104(%rsp), %rsi
call cudaEventElapsedTime@PLT
movss 68(%rsp), %xmm0
divss .LC16(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq stdout(%rip), %rdi
call fflush@PLT
cmpl $1, (%rsp)
je .L162
cmpl $2, (%rsp)
je .L163
.L150:
movl $0, %esi
movq 104(%rsp), %rdi
call cudaEventRecord@PLT
movl $1024, 140(%rsp)
movl $1, 144(%rsp)
movl $1024, 128(%rsp)
movl $1, 132(%rsp)
leal (%r14,%r14), %eax
cltq
movl $0, %r9d
leaq 0(,%rax,4), %r8
movq 140(%rsp), %rdx
movl $1, %ecx
movq 128(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L164
.L151:
movl $0, %esi
movq 112(%rsp), %rdi
call cudaEventRecord@PLT
movq 112(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 68(%rsp), %r13
movq 112(%rsp), %rdx
movq 104(%rsp), %rsi
movq %r13, %rdi
call cudaEventElapsedTime@PLT
movss 68(%rsp), %xmm0
divss .LC16(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC18(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq stdout(%rip), %rdi
call fflush@PLT
movl $0, %esi
movq 104(%rsp), %rdi
call cudaEventRecord@PLT
movl $2, %ecx
movl $4194304, %edx
movq 88(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movl $4194304, %edx
movq 96(%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 112(%rsp), %rdi
call cudaEventRecord@PLT
movq 112(%rsp), %rdi
call cudaEventSynchronize@PLT
movq 112(%rsp), %rdx
movq 104(%rsp), %rsi
movq %r13, %rdi
call cudaEventElapsedTime@PLT
movss 68(%rsp), %xmm0
divss .LC16(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC19(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq stdout(%rip), %rdi
call fflush@PLT
call clock@PLT
movq %rax, 32(%rsp)
movl $0, %eax
movss .LC5(%rip), %xmm2
movss %xmm2, (%rsp)
.L154:
movss 0(%rbp,%rax,4), %xmm0
movss (%rsp), %xmm1
comiss %xmm0, %xmm1
cmova %eax, %ebx
minss %xmm1, %xmm0
movss %xmm0, (%rsp)
addq $1, %rax
cmpq $1048576, %rax
jne .L154
movslq %r12d, %r14
salq $2, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %r13
movq %r14, %rdi
call malloc@PLT
movq %rax, %r14
movq %rax, %rcx
movq %r13, %rdx
movl %r12d, %esi
movl %ebx, %edi
call _Z13index_to_permjiPiS_
pxor %xmm0, %xmm0
cvtss2sd (%rsp), %xmm0
leaq .LC20(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl %r12d, %esi
movq %r13, %rdi
call _Z8printvecPii
movq %r15, %rdx
movl %r12d, %esi
movq %r13, %rdi
call _Z16save_vec_to_filePiiPc
call clock@PLT
movq 32(%rsp), %rcx
subq %rcx, %rax
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
divss .LC14(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC21(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq stdout(%rip), %rdi
call fflush@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 96(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
call cudaPeekAtLastError@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L165
call cudaDeviceSynchronize@PLT
.L148:
movq 8(%rsp), %rdi
call free@PLT
movq 16(%rsp), %rdi
call free@PLT
jmp .L143
.L161:
movslq %r12d, %rdi
salq $2, %rdi
call malloc@PLT
movq %rax, %rbx
call clock@PLT
movq %rax, %rbp
cmpl $1, (%rsp)
je .L166
cmpl $2, (%rsp)
je .L167
.L147:
call clock@PLT
movq %rax, %r13
pxor %xmm0, %xmm0
cvtss2sd 44(%rsp), %xmm0
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl %r12d, %esi
movq %rbx, %rdi
call _Z8printvecPii
subq %rbp, %r13
pxor %xmm0, %xmm0
cvtsi2ssq %r13, %xmm0
divss .LC14(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq stdout(%rip), %rdi
call fflush@PLT
movq %r15, %rdx
movl %r12d, %esi
movq %rbx, %rdi
call _Z16save_vec_to_filePiiPc
movq %rbx, %rdi
call free@PLT
jmp .L148
.L166:
movq %rbx, %r8
leaq _Z7L1_normiPfS_Pi(%rip), %rcx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movl %r12d, %edi
call _Z21compute_optimal_matchiPfS_PFfiS_S_PiES0_
movss %xmm0, 44(%rsp)
jmp .L147
.L167:
movq %rbx, %r8
leaq _Z15fro_norm_squareiPfS_Pi(%rip), %rcx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movl %r12d, %edi
call _Z21compute_optimal_matchiPfS_PFfiS_S_PiES0_
movss %xmm0, 44(%rsp)
jmp .L147
.L162:
leaq 120(%rsp), %rdi
movl $2, %r8d
movl $0, %ecx
movl $8, %edx
leaq _ZL13d_ptr_L1_norm(%rip), %rsi
call cudaMemcpyFromSymbol@PLT
jmp .L150
.L163:
leaq 120(%rsp), %rdi
movl $2, %r8d
movl $0, %ecx
movl $8, %edx
leaq _ZL21d_ptr_fro_norm_square(%rip), %rsi
call cudaMemcpyFromSymbol@PLT
jmp .L150
.L164:
movl 32(%rsp), %edx
movl %edx, %esi
shrl $20, %esi
addl $1, %esi
subq $8, %rsp
.cfi_def_cfa_offset 232
pushq 104(%rsp)
.cfi_def_cfa_offset 240
pushq 104(%rsp)
.cfi_def_cfa_offset 248
pushq 144(%rsp)
.cfi_def_cfa_offset 256
movq 112(%rsp), %r9
movq 104(%rsp), %r8
movl %r12d, %ecx
movl $1048576, %edi
call _Z72__device_stub__Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_PjiijiPfS_PFfiS_S_PiES_Pj
addq $32, %rsp
.cfi_def_cfa_offset 224
jmp .L151
.L165:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl $480, %r9d
leaq .LC22(%rip), %r8
leaq .LC23(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl %ebx, %edi
call exit@PLT
.L160:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2071:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC24:
.string "_Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj"
.section .rodata.str1.1
.LC25:
.string "d_ptr_fro_norm_square"
.LC26:
.string "d_ptr_L1_norm"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2099:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC24(%rip), %rdx
movq %rdx, %rcx
leaq _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC25(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21d_ptr_fro_norm_square(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC26(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13d_ptr_L1_norm(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2099:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL13d_ptr_L1_norm
.comm _ZL13d_ptr_L1_norm,8,8
.local _ZL21d_ptr_fro_norm_square
.comm _ZL21d_ptr_fro_norm_square,8,8
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC1:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC5:
.long 2139095039
.align 4
.LC14:
.long 1232348160
.align 4
.LC16:
.long 1148846080
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // in this code we do not use sparsity because the matrices are small
// all matrices therefore are full
// we assume that all the matrices are stored in a linear array, in a column major form
// the maximum graph size is set to 12 in the variable MAX_N_PERM but it can be increased if the GPU is more powerfull
// TO-DO: change the code so that we can scan just a fraction of all the n! possible permutations.
// This will allow using multiple GPUs in parallel
#include<stdio.h>
#include <time.h>
#include<float.h>
#define MAX_N_PERM 12
typedef unsigned int lint;
#define gerror(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// the code will allow us to use different norms
// the matrices are assume square
// the norm takes as input a permutation as well as two matrices
// do not forget that P is from 1...n while the indices in C are from 0 to n-1
__device__ __host__ float fro_norm_square(int n, float *A , float *B, int *perm){
float total = 0;
for (int i = 0; i < n; i++){ //go along column
for (int j = 0; j < n; j++){ // go along row
float value = A[i + n*j] - B[ (perm[i]-1) + n*(perm[j]-1)];
//printf("-- (% f , %f, %f, i = %d, perm_i = %d,j =%d , perm_j = %d)-- ", A[i + n*j],B[ (perm[i]-1) + n*(perm[j]-1) ],value, i, perm[i]-1, j, perm[j]-1);
total = total + value*value;
}
}
return total;
}
__device__ float (*d_ptr_fro_norm_square)(int , float * , float *, int *) = fro_norm_square;
// notice that if the matrices are just adjecency matrices where all the weights are 1 then there is no difference between
// having this L1_norm or the fro_norm_square above
__device__ __host__ float L1_norm(int n, float *A , float *B, int *perm){
float total = 0;
for (int i = 0; i < n; i++){ //go along column
for (int j = 0; j < n; j++){ // go along row
float value = A[i + n*j] - B[ (perm[i]-1) + n*(perm[j]-1)];
total = total + abs(value);
}
}
return total;
}
__device__ float (*d_ptr_L1_norm)(int , float * , float *, int *) = L1_norm;
// this function sets a list to consecutive numbers
__host__ __device__ inline void settoconsec(int *v, int n){
for (int i = 0; i < n; i++){
v[i] = i+1;
}
}
inline void copyvec(int *v_dest, int * v_source, int n){
for (int i = 0; i < n; i++){
v_dest[i] = v_source[i];
}
}
// this function swaps two elelents
__host__ __device__ inline void swap(int *v, int ix1, int ix2){
int tmp = v[ix1];
v[ix1] = v[ix2];
v[ix2] = tmp;
}
// this function prints a vector
__host__ __device__ void printvec(int *v, int n){
for (int i = 0; i < n ; i ++){
printf("%d ", v[i]);
}
printf("\n");
}
// this function prints a vector
__host__ __device__ void printfloatvec(float *v, int n){
for (int i = 0; i < n ; i ++){
printf("%f ", v[i]);
}
printf("\n");
}
// first we have a piece of code that computes the thing in serial form
// we assume that the matrices A and B are square and of the same dimension
// we do not use sparse matrices because the matrices are small anyway
// the norm we are computing is min_P || A - P^T B P ||
// we will also return the best permutation
float compute_optimal_match(int n, float *A, float *B, float (*metric)(int , float* , float *, int* ), int * bestperm ){
float opt_val = FLT_MAX;
int * v = (int *) malloc(n * sizeof(int) );
int * output = (int *) malloc(n * sizeof(int) );
settoconsec(v, n);
while( v[n-1] <= n ){
// note that the way we are going the swap here is a bit different because
// the elements from v are already in increasing form. Like
// 1 2 3 , 2 2 3 , 3 2 3, 1 3 3 , 2 3 3 , 3 3 3
// while in the parallel code the v is in the form
// 1 1 1 , 2 1 1, 3 1 1, 1 2 1, 2 2 1 , 3 2 1
settoconsec(output, n);
for( int i = 0; i < n ; i++){
swap(output, i, v[i]-1);
}
// at this point the vector output contains a permutation and we can compute a distance
float val = (*metric)( n , A , B , output );
if ( val < opt_val ){
opt_val = val;
copyvec( bestperm , output , n );
}
v[ 0 ] = v[ 0 ] + 1;
for (int i = 0; i < n-1 ; i++){
if( v[i] > n ){
v[i] = i+1;
v[ i + 1 ] = v[ i + 1 ] + 1;
}
}
}
free(output);
free(v);
return opt_val;
}
// this function transforms and index into a permutation
// the function requires a bit of scrap space
__device__ __host__ void index_to_perm(lint r, int n, int *perm, int * scrap){
for (int i = n ; i >=1; i--){
scrap[n - i] = (r % i) + 1;
r = r/i;
}
// note that the way we are going the swap here is a bit different because
// the elements from v are not in increasing form like in the cpu code.
// In the parallel code the scrap is in the form
// 1 1 1 , 2 1 1, 3 1 1, 1 2 1, 2 2 1 , 3 2 1
// but in the serial code it is
// 1 2 3 , 2 2 3 , 3 2 3, 1 3 3 , 2 3 3 , 3 3 3
settoconsec(perm, n);
for( int i = 0; i < n ; i++){
swap(perm, i, i + scrap[i]-1);
}
}
inline int fact(int n){
if (n <=1)
return 1;
else
return n*fact(n-1);
}
// this computes the optimal matching my testing different permutations in parallel
// we pass the nfact from outside to save time
// we cannot store the result of all evaluations in memory and then do a parallel max.
// there is just too much stuff to try. So each thread needs to keep a local max of several trials
__global__ void kernel_to_compute_optimal_match(int chunck_per_cycle, int num_perm_per_thread, lint nfact, int n, float *A, float *B, float (*metric)(int , float* , float *, int* ), float * obj_vals, lint * obj_perms ){
int baseix = blockIdx.x*blockDim.x + threadIdx.x;;
lint ix = baseix;
// we copy A and B to shared memory because it might be faster when we are computing the norms
extern __shared__ float AB_shared_mem[];
// we need to split the shared memory into different parts
float * shared_A = AB_shared_mem;
float * shared_B = &AB_shared_mem[n*n];
// the first thread of each block does the copy for the corresponding block
if (threadIdx.x == 0){
for (int i = 0; i < n*n ; i++){
shared_A[i] = A[i];
shared_B[i] = B[i];
}
}
__syncthreads();
float best_val = FLT_MAX;
lint best_perm_ix;
for (int i = 0; i < num_perm_per_thread ; i++){
ix = baseix + chunck_per_cycle*i;
// filter the stuff that does not matter
if (ix < nfact){
// probably we do not need more than 20 here
int perm[MAX_N_PERM];
int scrap[MAX_N_PERM];
index_to_perm( ix , n, perm, scrap);
float val = (*metric)( n, shared_A , shared_B, perm);
if (val < best_val){
best_val = val;
best_perm_ix = ix;
}
}
}
obj_vals[baseix] = best_val;
obj_perms[baseix] = best_perm_ix;
}
void test_index_perm(int n ){
// test the function that indexes permutations sequentially
int *perm = (int *) malloc(n * sizeof(int));
int *scrap = (int *) malloc(n * sizeof(int));
for (int r = 0; r < fact(n) ; r++){
index_to_perm(r, n, perm, scrap);
//printvec(perm,n);
}
free(perm);
free(scrap);
}
// this function will allocate space for A
float * read_graph_into_adj_mat(char * filename, int *graphsize, int directed){
// if we are not given the graph size then we first read the file to try to estimate the size
// of the graph by trying to find the largest index used
// here we assume that the indices used are 1, 2, ..., n
if (*graphsize == -1){
FILE * graphfile = fopen(filename , "r");
int dim = -1;
int edge1, edge2;
while ( fscanf(graphfile, "%d %d\n", &edge1, &edge2) != EOF){
if (dim < edge1){
dim = edge1;
}
if (dim < edge2){
dim = edge2;
}
}
fclose(graphfile);
*graphsize = dim;
}
// we use calloc because we want most of the entries to be zero and just have to set a few to non-zero
// whatever edges are not specified in the file we are reading we will assume are zero
float *A = (float *) calloc( (*graphsize) , (*graphsize)*sizeof(float) ) ;
FILE * graphfile = fopen(filename , "r");
int edge1, edge2;
while ( fscanf(graphfile, "%d %d\n", &edge1, &edge2) != EOF){
if (edge1 <= (*graphsize) && edge2 <= (*graphsize) && edge1 >=1 && edge2 >=1 ){
A[(edge1-1) + (edge2-1)* (*graphsize) ] = 1;
// if the graph is undirected, we force it to be undirected
if (directed == 0){
A[(edge2-1) + (edge1-1)* (*graphsize) ] = 1;
}
}
}
fclose(graphfile);
return A;
}
// this writes a vector to an output file
void save_vec_to_file(int * vec, int n , char* output_file){
// we only write if there is stuff to write. Otherwise we leave things as they are
if (n > 1){
FILE * vec_file = fopen(output_file , "w");
for (int i = 0; i < n-1; i++){
fprintf(vec_file,"%d ", vec[i]);
}
fprintf(vec_file,"%d", vec[n-1]);
fclose(vec_file);
}
}
int main(int argc,char *argv[]){
if (argc != 8){
printf("The arguments must be filenameA, filenameB, outputfile, L1vsL2, directed/undirected, gpu/cpu, size\n");
return 0;
}
char * filenameA = (char *) argv[1];
char * filenameB = (char *) argv[2];
char * fileoutput = (char *) argv[3];
int norm_to_use = atoi( argv[4] );
int directed = atoi( argv[5] );
int cpu_vs_gpu = atoi( argv[6] );
int graphsize = atoi( argv[7] );
int sizeA = graphsize;
int sizeB = graphsize;
float *A = read_graph_into_adj_mat( filenameA , &sizeA , directed );
float *B = read_graph_into_adj_mat( filenameB , &sizeB , directed );
if ( sizeA != sizeB ){
printf("Error, graphs of different sizes\n");
return 0;
}
clock_t cpu_start, cpu_end;
float cputime;
int n = sizeA;
lint nfact = fact(n);
if (cpu_vs_gpu == 1){
int * bestperm = (int *) malloc(n * sizeof( int ) ); //this is where we will keep the best perm
cpu_start = clock();
// we might want to try different norms
float val;
if (norm_to_use == 1){
val = compute_optimal_match(n, A, B, &L1_norm , bestperm);
}
if (norm_to_use == 2){
val = compute_optimal_match(n, A, B, &fro_norm_square , bestperm);
}
cpu_end = clock();
printf("CPU Opt Val = %f\n", val);
printvec(bestperm, n);
cputime = (float)(cpu_end - cpu_start) / CLOCKS_PER_SEC;
printf("SERIAL: Time to compute opt = %f\n",cputime); fflush(stdout);
// store the vector in the output
save_vec_to_file(bestperm, n , fileoutput);
// free the vector
free(bestperm);
}else{
// now we have some GPU code
cudaSetDevice( 0 );
cudaDeviceReset();
// here we compute the division of work
// we try to make everything result in an iteger division of work
int numthreadsperblock = 1024;
int numblocks = 1024;
int chunck_per_cycle = numblocks*numthreadsperblock;
int num_stuff_per_thread = 1 + (nfact / chunck_per_cycle );
//printf("Threads per block = %d, Num blocks = %d , chunck_per_cycle = %d, num_stuff_per_thread = %d\n",numthreadsperblock,numblocks,chunck_per_cycle,num_stuff_per_thread);
float * d_A;
float * d_B;
float * d_obj_vals;
lint * d_obj_perms;
float * h_obj_vals = (float *) malloc( chunck_per_cycle*sizeof(float) );
lint * h_obj_perms = (lint *) malloc( chunck_per_cycle*sizeof(lint) );
// create some timing variables
cudaEvent_t gpu_start, gpu_end;
float gputime;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_end);
cudaEventRecord(gpu_start, 0);
cudaMalloc((void **)&d_A, n*n*sizeof(float) );
cudaMalloc((void **)&d_B, n*n*sizeof(float) );
cudaMalloc((void **)&d_obj_vals, chunck_per_cycle*sizeof(float) );
cudaMalloc((void **)&d_obj_perms, chunck_per_cycle*sizeof(lint) );
cudaMemcpy( (void*) d_A , (void*) A , n*n*sizeof(float) , cudaMemcpyHostToDevice );
cudaMemcpy( (void*) d_B , (void*) B , n*n*sizeof(float) , cudaMemcpyHostToDevice );
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
cudaEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to allocate space: %f\n", gputime/1000); fflush(stdout);
// this is the function pointer that we will pass to the GPU
float (*h_d_per_metric)(int , float *, float * , int * );
// we might want to use different norms
if (norm_to_use == 1){
cudaMemcpyFromSymbol(&h_d_per_metric, d_ptr_L1_norm, sizeof( float (*)(int , float *, float * , int * ) ));
}
if (norm_to_use == 2){
cudaMemcpyFromSymbol(&h_d_per_metric, d_ptr_fro_norm_square, sizeof( float (*)(int , float *, float * , int * ) ));
}
cudaEventRecord(gpu_start, 0);
kernel_to_compute_optimal_match<<<numblocks,numthreadsperblock,n*n*2*sizeof(float)>>>(chunck_per_cycle,num_stuff_per_thread , nfact, n, d_A, d_B, h_d_per_metric , d_obj_vals, d_obj_perms);
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
cudaEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to run the kernel: %f\n", gputime/1000); fflush(stdout);
// now we copy the stuff back to the CPU and get the maximum by hand
cudaEventRecord(gpu_start, 0);
cudaMemcpy( (void*) h_obj_vals , (void*) d_obj_vals , chunck_per_cycle*sizeof(float) , cudaMemcpyDeviceToHost );
cudaMemcpy( (void*) h_obj_perms , (void*) d_obj_perms , chunck_per_cycle*sizeof(float) , cudaMemcpyDeviceToHost );
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
cudaEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to copy stuff back to the CPU: %f\n", gputime/1000); fflush(stdout);
cpu_start = clock();
float best_gpu_val = FLT_MAX;
lint best_ix;
for (int i = 0 ; i < chunck_per_cycle ; i++){
float val = h_obj_vals[i];
if (val < best_gpu_val){
best_gpu_val = val;
best_ix = i;
}
}
int * perm = (int *) malloc(n * sizeof(int));
int * scrap = (int *) malloc(n * sizeof(int));
index_to_perm(best_ix, n, perm, scrap);
printf("GPU Opt Val = %f\n", best_gpu_val);
printvec(perm, n);
save_vec_to_file(perm, n , fileoutput);
cpu_end = clock();
cputime = (float)(cpu_end - cpu_start) / CLOCKS_PER_SEC;
printf("SERIAL: Time to compute the last step = %f\n",cputime); fflush(stdout);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_obj_vals);
cudaFree(d_obj_perms);
free(h_obj_vals);
free(h_obj_perms);
free(perm);
free(scrap);
gerror( cudaPeekAtLastError() );
cudaDeviceSynchronize();
}
free(A);
free(B);
return 0;
} | // in this code we do not use sparsity because the matrices are small
// all matrices therefore are full
// we assume that all the matrices are stored in a linear array, in a column major form
// the maximum graph size is set to 12 in the variable MAX_N_PERM but it can be increased if the GPU is more powerfull
// TO-DO: change the code so that we can scan just a fraction of all the n! possible permutations.
// This will allow using multiple GPUs in parallel
#include <hip/hip_runtime.h>
#include<stdio.h>
#include <time.h>
#include<float.h>
#define MAX_N_PERM 12
typedef unsigned int lint;
#define gerror(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// the code will allow us to use different norms
// the matrices are assume square
// the norm takes as input a permutation as well as two matrices
// do not forget that P is from 1...n while the indices in C are from 0 to n-1
__device__ __host__ float fro_norm_square(int n, float *A , float *B, int *perm){
float total = 0;
for (int i = 0; i < n; i++){ //go along column
for (int j = 0; j < n; j++){ // go along row
float value = A[i + n*j] - B[ (perm[i]-1) + n*(perm[j]-1)];
//printf("-- (% f , %f, %f, i = %d, perm_i = %d,j =%d , perm_j = %d)-- ", A[i + n*j],B[ (perm[i]-1) + n*(perm[j]-1) ],value, i, perm[i]-1, j, perm[j]-1);
total = total + value*value;
}
}
return total;
}
__device__ float (*d_ptr_fro_norm_square)(int , float * , float *, int *) = fro_norm_square;
// notice that if the matrices are just adjecency matrices where all the weights are 1 then there is no difference between
// having this L1_norm or the fro_norm_square above
__device__ __host__ float L1_norm(int n, float *A , float *B, int *perm){
float total = 0;
for (int i = 0; i < n; i++){ //go along column
for (int j = 0; j < n; j++){ // go along row
float value = A[i + n*j] - B[ (perm[i]-1) + n*(perm[j]-1)];
total = total + abs(value);
}
}
return total;
}
__device__ float (*d_ptr_L1_norm)(int , float * , float *, int *) = L1_norm;
// this function sets a list to consecutive numbers
__host__ __device__ inline void settoconsec(int *v, int n){
for (int i = 0; i < n; i++){
v[i] = i+1;
}
}
inline void copyvec(int *v_dest, int * v_source, int n){
for (int i = 0; i < n; i++){
v_dest[i] = v_source[i];
}
}
// this function swaps two elelents
__host__ __device__ inline void swap(int *v, int ix1, int ix2){
int tmp = v[ix1];
v[ix1] = v[ix2];
v[ix2] = tmp;
}
// this function prints a vector
__host__ __device__ void printvec(int *v, int n){
for (int i = 0; i < n ; i ++){
printf("%d ", v[i]);
}
printf("\n");
}
// this function prints a vector
__host__ __device__ void printfloatvec(float *v, int n){
for (int i = 0; i < n ; i ++){
printf("%f ", v[i]);
}
printf("\n");
}
// first we have a piece of code that computes the thing in serial form
// we assume that the matrices A and B are square and of the same dimension
// we do not use sparse matrices because the matrices are small anyway
// the norm we are computing is min_P || A - P^T B P ||
// we will also return the best permutation
float compute_optimal_match(int n, float *A, float *B, float (*metric)(int , float* , float *, int* ), int * bestperm ){
float opt_val = FLT_MAX;
int * v = (int *) malloc(n * sizeof(int) );
int * output = (int *) malloc(n * sizeof(int) );
settoconsec(v, n);
while( v[n-1] <= n ){
// note that the way we are going the swap here is a bit different because
// the elements from v are already in increasing form. Like
// 1 2 3 , 2 2 3 , 3 2 3, 1 3 3 , 2 3 3 , 3 3 3
// while in the parallel code the v is in the form
// 1 1 1 , 2 1 1, 3 1 1, 1 2 1, 2 2 1 , 3 2 1
settoconsec(output, n);
for( int i = 0; i < n ; i++){
swap(output, i, v[i]-1);
}
// at this point the vector output contains a permutation and we can compute a distance
float val = (*metric)( n , A , B , output );
if ( val < opt_val ){
opt_val = val;
copyvec( bestperm , output , n );
}
v[ 0 ] = v[ 0 ] + 1;
for (int i = 0; i < n-1 ; i++){
if( v[i] > n ){
v[i] = i+1;
v[ i + 1 ] = v[ i + 1 ] + 1;
}
}
}
free(output);
free(v);
return opt_val;
}
// this function transforms and index into a permutation
// the function requires a bit of scrap space
__device__ __host__ void index_to_perm(lint r, int n, int *perm, int * scrap){
for (int i = n ; i >=1; i--){
scrap[n - i] = (r % i) + 1;
r = r/i;
}
// note that the way we are going the swap here is a bit different because
// the elements from v are not in increasing form like in the cpu code.
// In the parallel code the scrap is in the form
// 1 1 1 , 2 1 1, 3 1 1, 1 2 1, 2 2 1 , 3 2 1
// but in the serial code it is
// 1 2 3 , 2 2 3 , 3 2 3, 1 3 3 , 2 3 3 , 3 3 3
settoconsec(perm, n);
for( int i = 0; i < n ; i++){
swap(perm, i, i + scrap[i]-1);
}
}
inline int fact(int n){
if (n <=1)
return 1;
else
return n*fact(n-1);
}
// this computes the optimal matching my testing different permutations in parallel
// we pass the nfact from outside to save time
// we cannot store the result of all evaluations in memory and then do a parallel max.
// there is just too much stuff to try. So each thread needs to keep a local max of several trials
__global__ void kernel_to_compute_optimal_match(int chunck_per_cycle, int num_perm_per_thread, lint nfact, int n, float *A, float *B, float (*metric)(int , float* , float *, int* ), float * obj_vals, lint * obj_perms ){
int baseix = blockIdx.x*blockDim.x + threadIdx.x;;
lint ix = baseix;
// we copy A and B to shared memory because it might be faster when we are computing the norms
extern __shared__ float AB_shared_mem[];
// we need to split the shared memory into different parts
float * shared_A = AB_shared_mem;
float * shared_B = &AB_shared_mem[n*n];
// the first thread of each block does the copy for the corresponding block
if (threadIdx.x == 0){
for (int i = 0; i < n*n ; i++){
shared_A[i] = A[i];
shared_B[i] = B[i];
}
}
__syncthreads();
float best_val = FLT_MAX;
lint best_perm_ix;
for (int i = 0; i < num_perm_per_thread ; i++){
ix = baseix + chunck_per_cycle*i;
// filter the stuff that does not matter
if (ix < nfact){
// probably we do not need more than 20 here
int perm[MAX_N_PERM];
int scrap[MAX_N_PERM];
index_to_perm( ix , n, perm, scrap);
float val = (*metric)( n, shared_A , shared_B, perm);
if (val < best_val){
best_val = val;
best_perm_ix = ix;
}
}
}
obj_vals[baseix] = best_val;
obj_perms[baseix] = best_perm_ix;
}
void test_index_perm(int n ){
// test the function that indexes permutations sequentially
int *perm = (int *) malloc(n * sizeof(int));
int *scrap = (int *) malloc(n * sizeof(int));
for (int r = 0; r < fact(n) ; r++){
index_to_perm(r, n, perm, scrap);
//printvec(perm,n);
}
free(perm);
free(scrap);
}
// this function will allocate space for A
float * read_graph_into_adj_mat(char * filename, int *graphsize, int directed){
// if we are not given the graph size then we first read the file to try to estimate the size
// of the graph by trying to find the largest index used
// here we assume that the indices used are 1, 2, ..., n
if (*graphsize == -1){
FILE * graphfile = fopen(filename , "r");
int dim = -1;
int edge1, edge2;
while ( fscanf(graphfile, "%d %d\n", &edge1, &edge2) != EOF){
if (dim < edge1){
dim = edge1;
}
if (dim < edge2){
dim = edge2;
}
}
fclose(graphfile);
*graphsize = dim;
}
// we use calloc because we want most of the entries to be zero and just have to set a few to non-zero
// whatever edges are not specified in the file we are reading we will assume are zero
float *A = (float *) calloc( (*graphsize) , (*graphsize)*sizeof(float) ) ;
FILE * graphfile = fopen(filename , "r");
int edge1, edge2;
while ( fscanf(graphfile, "%d %d\n", &edge1, &edge2) != EOF){
if (edge1 <= (*graphsize) && edge2 <= (*graphsize) && edge1 >=1 && edge2 >=1 ){
A[(edge1-1) + (edge2-1)* (*graphsize) ] = 1;
// if the graph is undirected, we force it to be undirected
if (directed == 0){
A[(edge2-1) + (edge1-1)* (*graphsize) ] = 1;
}
}
}
fclose(graphfile);
return A;
}
// this writes a vector to an output file
void save_vec_to_file(int * vec, int n , char* output_file){
// we only write if there is stuff to write. Otherwise we leave things as they are
if (n > 1){
FILE * vec_file = fopen(output_file , "w");
for (int i = 0; i < n-1; i++){
fprintf(vec_file,"%d ", vec[i]);
}
fprintf(vec_file,"%d", vec[n-1]);
fclose(vec_file);
}
}
int main(int argc,char *argv[]){
if (argc != 8){
printf("The arguments must be filenameA, filenameB, outputfile, L1vsL2, directed/undirected, gpu/cpu, size\n");
return 0;
}
char * filenameA = (char *) argv[1];
char * filenameB = (char *) argv[2];
char * fileoutput = (char *) argv[3];
int norm_to_use = atoi( argv[4] );
int directed = atoi( argv[5] );
int cpu_vs_gpu = atoi( argv[6] );
int graphsize = atoi( argv[7] );
int sizeA = graphsize;
int sizeB = graphsize;
float *A = read_graph_into_adj_mat( filenameA , &sizeA , directed );
float *B = read_graph_into_adj_mat( filenameB , &sizeB , directed );
if ( sizeA != sizeB ){
printf("Error, graphs of different sizes\n");
return 0;
}
clock_t cpu_start, cpu_end;
float cputime;
int n = sizeA;
lint nfact = fact(n);
if (cpu_vs_gpu == 1){
int * bestperm = (int *) malloc(n * sizeof( int ) ); //this is where we will keep the best perm
cpu_start = clock();
// we might want to try different norms
float val;
if (norm_to_use == 1){
val = compute_optimal_match(n, A, B, &L1_norm , bestperm);
}
if (norm_to_use == 2){
val = compute_optimal_match(n, A, B, &fro_norm_square , bestperm);
}
cpu_end = clock();
printf("CPU Opt Val = %f\n", val);
printvec(bestperm, n);
cputime = (float)(cpu_end - cpu_start) / CLOCKS_PER_SEC;
printf("SERIAL: Time to compute opt = %f\n",cputime); fflush(stdout);
// store the vector in the output
save_vec_to_file(bestperm, n , fileoutput);
// free the vector
free(bestperm);
}else{
// now we have some GPU code
hipSetDevice( 0 );
hipDeviceReset();
// here we compute the division of work
// we try to make everything result in an iteger division of work
int numthreadsperblock = 1024;
int numblocks = 1024;
int chunck_per_cycle = numblocks*numthreadsperblock;
int num_stuff_per_thread = 1 + (nfact / chunck_per_cycle );
//printf("Threads per block = %d, Num blocks = %d , chunck_per_cycle = %d, num_stuff_per_thread = %d\n",numthreadsperblock,numblocks,chunck_per_cycle,num_stuff_per_thread);
float * d_A;
float * d_B;
float * d_obj_vals;
lint * d_obj_perms;
float * h_obj_vals = (float *) malloc( chunck_per_cycle*sizeof(float) );
lint * h_obj_perms = (lint *) malloc( chunck_per_cycle*sizeof(lint) );
// create some timing variables
hipEvent_t gpu_start, gpu_end;
float gputime;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_end);
hipEventRecord(gpu_start, 0);
hipMalloc((void **)&d_A, n*n*sizeof(float) );
hipMalloc((void **)&d_B, n*n*sizeof(float) );
hipMalloc((void **)&d_obj_vals, chunck_per_cycle*sizeof(float) );
hipMalloc((void **)&d_obj_perms, chunck_per_cycle*sizeof(lint) );
hipMemcpy( (void*) d_A , (void*) A , n*n*sizeof(float) , hipMemcpyHostToDevice );
hipMemcpy( (void*) d_B , (void*) B , n*n*sizeof(float) , hipMemcpyHostToDevice );
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
hipEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to allocate space: %f\n", gputime/1000); fflush(stdout);
// this is the function pointer that we will pass to the GPU
float (*h_d_per_metric)(int , float *, float * , int * );
// we might want to use different norms
if (norm_to_use == 1){
hipMemcpyFromSymbol(&h_d_per_metric, HIP_SYMBOL(d_ptr_L1_norm), sizeof( float (*)(int , float *, float * , int * ) ));
}
if (norm_to_use == 2){
hipMemcpyFromSymbol(&h_d_per_metric, HIP_SYMBOL(d_ptr_fro_norm_square), sizeof( float (*)(int , float *, float * , int * ) ));
}
hipEventRecord(gpu_start, 0);
kernel_to_compute_optimal_match<<<numblocks,numthreadsperblock,n*n*2*sizeof(float)>>>(chunck_per_cycle,num_stuff_per_thread , nfact, n, d_A, d_B, h_d_per_metric , d_obj_vals, d_obj_perms);
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
hipEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to run the kernel: %f\n", gputime/1000); fflush(stdout);
// now we copy the stuff back to the CPU and get the maximum by hand
hipEventRecord(gpu_start, 0);
hipMemcpy( (void*) h_obj_vals , (void*) d_obj_vals , chunck_per_cycle*sizeof(float) , hipMemcpyDeviceToHost );
hipMemcpy( (void*) h_obj_perms , (void*) d_obj_perms , chunck_per_cycle*sizeof(float) , hipMemcpyDeviceToHost );
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
hipEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to copy stuff back to the CPU: %f\n", gputime/1000); fflush(stdout);
cpu_start = clock();
float best_gpu_val = FLT_MAX;
lint best_ix;
for (int i = 0 ; i < chunck_per_cycle ; i++){
float val = h_obj_vals[i];
if (val < best_gpu_val){
best_gpu_val = val;
best_ix = i;
}
}
int * perm = (int *) malloc(n * sizeof(int));
int * scrap = (int *) malloc(n * sizeof(int));
index_to_perm(best_ix, n, perm, scrap);
printf("GPU Opt Val = %f\n", best_gpu_val);
printvec(perm, n);
save_vec_to_file(perm, n , fileoutput);
cpu_end = clock();
cputime = (float)(cpu_end - cpu_start) / CLOCKS_PER_SEC;
printf("SERIAL: Time to compute the last step = %f\n",cputime); fflush(stdout);
hipFree(d_A);
hipFree(d_B);
hipFree(d_obj_vals);
hipFree(d_obj_perms);
free(h_obj_vals);
free(h_obj_perms);
free(perm);
free(scrap);
gerror( hipPeekAtLastError() );
hipDeviceSynchronize();
}
free(A);
free(B);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // in this code we do not use sparsity because the matrices are small
// all matrices therefore are full
// we assume that all the matrices are stored in a linear array, in a column major form
// the maximum graph size is set to 12 in the variable MAX_N_PERM but it can be increased if the GPU is more powerfull
// TO-DO: change the code so that we can scan just a fraction of all the n! possible permutations.
// This will allow using multiple GPUs in parallel
#include <hip/hip_runtime.h>
#include<stdio.h>
#include <time.h>
#include<float.h>
#define MAX_N_PERM 12
typedef unsigned int lint;
#define gerror(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// the code will allow us to use different norms
// the matrices are assume square
// the norm takes as input a permutation as well as two matrices
// do not forget that P is from 1...n while the indices in C are from 0 to n-1
__device__ __host__ float fro_norm_square(int n, float *A , float *B, int *perm){
float total = 0;
for (int i = 0; i < n; i++){ //go along column
for (int j = 0; j < n; j++){ // go along row
float value = A[i + n*j] - B[ (perm[i]-1) + n*(perm[j]-1)];
//printf("-- (% f , %f, %f, i = %d, perm_i = %d,j =%d , perm_j = %d)-- ", A[i + n*j],B[ (perm[i]-1) + n*(perm[j]-1) ],value, i, perm[i]-1, j, perm[j]-1);
total = total + value*value;
}
}
return total;
}
__device__ float (*d_ptr_fro_norm_square)(int , float * , float *, int *) = fro_norm_square;
// notice that if the matrices are just adjecency matrices where all the weights are 1 then there is no difference between
// having this L1_norm or the fro_norm_square above
__device__ __host__ float L1_norm(int n, float *A , float *B, int *perm){
float total = 0;
for (int i = 0; i < n; i++){ //go along column
for (int j = 0; j < n; j++){ // go along row
float value = A[i + n*j] - B[ (perm[i]-1) + n*(perm[j]-1)];
total = total + abs(value);
}
}
return total;
}
__device__ float (*d_ptr_L1_norm)(int , float * , float *, int *) = L1_norm;
// this function sets a list to consecutive numbers
__host__ __device__ inline void settoconsec(int *v, int n){
for (int i = 0; i < n; i++){
v[i] = i+1;
}
}
inline void copyvec(int *v_dest, int * v_source, int n){
for (int i = 0; i < n; i++){
v_dest[i] = v_source[i];
}
}
// this function swaps two elelents
__host__ __device__ inline void swap(int *v, int ix1, int ix2){
int tmp = v[ix1];
v[ix1] = v[ix2];
v[ix2] = tmp;
}
// this function prints a vector
__host__ __device__ void printvec(int *v, int n){
for (int i = 0; i < n ; i ++){
printf("%d ", v[i]);
}
printf("\n");
}
// this function prints a vector
__host__ __device__ void printfloatvec(float *v, int n){
for (int i = 0; i < n ; i ++){
printf("%f ", v[i]);
}
printf("\n");
}
// first we have a piece of code that computes the thing in serial form
// we assume that the matrices A and B are square and of the same dimension
// we do not use sparse matrices because the matrices are small anyway
// the norm we are computing is min_P || A - P^T B P ||
// we will also return the best permutation
float compute_optimal_match(int n, float *A, float *B, float (*metric)(int , float* , float *, int* ), int * bestperm ){
float opt_val = FLT_MAX;
int * v = (int *) malloc(n * sizeof(int) );
int * output = (int *) malloc(n * sizeof(int) );
settoconsec(v, n);
while( v[n-1] <= n ){
// note that the way we are going the swap here is a bit different because
// the elements from v are already in increasing form. Like
// 1 2 3 , 2 2 3 , 3 2 3, 1 3 3 , 2 3 3 , 3 3 3
// while in the parallel code the v is in the form
// 1 1 1 , 2 1 1, 3 1 1, 1 2 1, 2 2 1 , 3 2 1
settoconsec(output, n);
for( int i = 0; i < n ; i++){
swap(output, i, v[i]-1);
}
// at this point the vector output contains a permutation and we can compute a distance
float val = (*metric)( n , A , B , output );
if ( val < opt_val ){
opt_val = val;
copyvec( bestperm , output , n );
}
v[ 0 ] = v[ 0 ] + 1;
for (int i = 0; i < n-1 ; i++){
if( v[i] > n ){
v[i] = i+1;
v[ i + 1 ] = v[ i + 1 ] + 1;
}
}
}
free(output);
free(v);
return opt_val;
}
// this function transforms and index into a permutation
// the function requires a bit of scrap space
__device__ __host__ void index_to_perm(lint r, int n, int *perm, int * scrap){
for (int i = n ; i >=1; i--){
scrap[n - i] = (r % i) + 1;
r = r/i;
}
// note that the way we are going the swap here is a bit different because
// the elements from v are not in increasing form like in the cpu code.
// In the parallel code the scrap is in the form
// 1 1 1 , 2 1 1, 3 1 1, 1 2 1, 2 2 1 , 3 2 1
// but in the serial code it is
// 1 2 3 , 2 2 3 , 3 2 3, 1 3 3 , 2 3 3 , 3 3 3
settoconsec(perm, n);
for( int i = 0; i < n ; i++){
swap(perm, i, i + scrap[i]-1);
}
}
inline int fact(int n){
if (n <=1)
return 1;
else
return n*fact(n-1);
}
// this computes the optimal matching my testing different permutations in parallel
// we pass the nfact from outside to save time
// we cannot store the result of all evaluations in memory and then do a parallel max.
// there is just too much stuff to try. So each thread needs to keep a local max of several trials
__global__ void kernel_to_compute_optimal_match(int chunck_per_cycle, int num_perm_per_thread, lint nfact, int n, float *A, float *B, float (*metric)(int , float* , float *, int* ), float * obj_vals, lint * obj_perms ){
int baseix = blockIdx.x*blockDim.x + threadIdx.x;;
lint ix = baseix;
// we copy A and B to shared memory because it might be faster when we are computing the norms
extern __shared__ float AB_shared_mem[];
// we need to split the shared memory into different parts
float * shared_A = AB_shared_mem;
float * shared_B = &AB_shared_mem[n*n];
// the first thread of each block does the copy for the corresponding block
if (threadIdx.x == 0){
for (int i = 0; i < n*n ; i++){
shared_A[i] = A[i];
shared_B[i] = B[i];
}
}
__syncthreads();
float best_val = FLT_MAX;
lint best_perm_ix;
for (int i = 0; i < num_perm_per_thread ; i++){
ix = baseix + chunck_per_cycle*i;
// filter the stuff that does not matter
if (ix < nfact){
// probably we do not need more than 20 here
int perm[MAX_N_PERM];
int scrap[MAX_N_PERM];
index_to_perm( ix , n, perm, scrap);
float val = (*metric)( n, shared_A , shared_B, perm);
if (val < best_val){
best_val = val;
best_perm_ix = ix;
}
}
}
obj_vals[baseix] = best_val;
obj_perms[baseix] = best_perm_ix;
}
void test_index_perm(int n ){
// test the function that indexes permutations sequentially
int *perm = (int *) malloc(n * sizeof(int));
int *scrap = (int *) malloc(n * sizeof(int));
for (int r = 0; r < fact(n) ; r++){
index_to_perm(r, n, perm, scrap);
//printvec(perm,n);
}
free(perm);
free(scrap);
}
// this function will allocate space for A
float * read_graph_into_adj_mat(char * filename, int *graphsize, int directed){
// if we are not given the graph size then we first read the file to try to estimate the size
// of the graph by trying to find the largest index used
// here we assume that the indices used are 1, 2, ..., n
if (*graphsize == -1){
FILE * graphfile = fopen(filename , "r");
int dim = -1;
int edge1, edge2;
while ( fscanf(graphfile, "%d %d\n", &edge1, &edge2) != EOF){
if (dim < edge1){
dim = edge1;
}
if (dim < edge2){
dim = edge2;
}
}
fclose(graphfile);
*graphsize = dim;
}
// we use calloc because we want most of the entries to be zero and just have to set a few to non-zero
// whatever edges are not specified in the file we are reading we will assume are zero
float *A = (float *) calloc( (*graphsize) , (*graphsize)*sizeof(float) ) ;
FILE * graphfile = fopen(filename , "r");
int edge1, edge2;
while ( fscanf(graphfile, "%d %d\n", &edge1, &edge2) != EOF){
if (edge1 <= (*graphsize) && edge2 <= (*graphsize) && edge1 >=1 && edge2 >=1 ){
A[(edge1-1) + (edge2-1)* (*graphsize) ] = 1;
// if the graph is undirected, we force it to be undirected
if (directed == 0){
A[(edge2-1) + (edge1-1)* (*graphsize) ] = 1;
}
}
}
fclose(graphfile);
return A;
}
// this writes a vector to an output file
void save_vec_to_file(int * vec, int n , char* output_file){
// we only write if there is stuff to write. Otherwise we leave things as they are
if (n > 1){
FILE * vec_file = fopen(output_file , "w");
for (int i = 0; i < n-1; i++){
fprintf(vec_file,"%d ", vec[i]);
}
fprintf(vec_file,"%d", vec[n-1]);
fclose(vec_file);
}
}
int main(int argc,char *argv[]){
if (argc != 8){
printf("The arguments must be filenameA, filenameB, outputfile, L1vsL2, directed/undirected, gpu/cpu, size\n");
return 0;
}
char * filenameA = (char *) argv[1];
char * filenameB = (char *) argv[2];
char * fileoutput = (char *) argv[3];
int norm_to_use = atoi( argv[4] );
int directed = atoi( argv[5] );
int cpu_vs_gpu = atoi( argv[6] );
int graphsize = atoi( argv[7] );
int sizeA = graphsize;
int sizeB = graphsize;
float *A = read_graph_into_adj_mat( filenameA , &sizeA , directed );
float *B = read_graph_into_adj_mat( filenameB , &sizeB , directed );
if ( sizeA != sizeB ){
printf("Error, graphs of different sizes\n");
return 0;
}
clock_t cpu_start, cpu_end;
float cputime;
int n = sizeA;
lint nfact = fact(n);
if (cpu_vs_gpu == 1){
int * bestperm = (int *) malloc(n * sizeof( int ) ); //this is where we will keep the best perm
cpu_start = clock();
// we might want to try different norms
float val;
if (norm_to_use == 1){
val = compute_optimal_match(n, A, B, &L1_norm , bestperm);
}
if (norm_to_use == 2){
val = compute_optimal_match(n, A, B, &fro_norm_square , bestperm);
}
cpu_end = clock();
printf("CPU Opt Val = %f\n", val);
printvec(bestperm, n);
cputime = (float)(cpu_end - cpu_start) / CLOCKS_PER_SEC;
printf("SERIAL: Time to compute opt = %f\n",cputime); fflush(stdout);
// store the vector in the output
save_vec_to_file(bestperm, n , fileoutput);
// free the vector
free(bestperm);
}else{
// now we have some GPU code
hipSetDevice( 0 );
hipDeviceReset();
// here we compute the division of work
// we try to make everything result in an iteger division of work
int numthreadsperblock = 1024;
int numblocks = 1024;
int chunck_per_cycle = numblocks*numthreadsperblock;
int num_stuff_per_thread = 1 + (nfact / chunck_per_cycle );
//printf("Threads per block = %d, Num blocks = %d , chunck_per_cycle = %d, num_stuff_per_thread = %d\n",numthreadsperblock,numblocks,chunck_per_cycle,num_stuff_per_thread);
float * d_A;
float * d_B;
float * d_obj_vals;
lint * d_obj_perms;
float * h_obj_vals = (float *) malloc( chunck_per_cycle*sizeof(float) );
lint * h_obj_perms = (lint *) malloc( chunck_per_cycle*sizeof(lint) );
// create some timing variables
hipEvent_t gpu_start, gpu_end;
float gputime;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_end);
hipEventRecord(gpu_start, 0);
hipMalloc((void **)&d_A, n*n*sizeof(float) );
hipMalloc((void **)&d_B, n*n*sizeof(float) );
hipMalloc((void **)&d_obj_vals, chunck_per_cycle*sizeof(float) );
hipMalloc((void **)&d_obj_perms, chunck_per_cycle*sizeof(lint) );
hipMemcpy( (void*) d_A , (void*) A , n*n*sizeof(float) , hipMemcpyHostToDevice );
hipMemcpy( (void*) d_B , (void*) B , n*n*sizeof(float) , hipMemcpyHostToDevice );
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
hipEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to allocate space: %f\n", gputime/1000); fflush(stdout);
// this is the function pointer that we will pass to the GPU
float (*h_d_per_metric)(int , float *, float * , int * );
// we might want to use different norms
if (norm_to_use == 1){
hipMemcpyFromSymbol(&h_d_per_metric, HIP_SYMBOL(d_ptr_L1_norm), sizeof( float (*)(int , float *, float * , int * ) ));
}
if (norm_to_use == 2){
hipMemcpyFromSymbol(&h_d_per_metric, HIP_SYMBOL(d_ptr_fro_norm_square), sizeof( float (*)(int , float *, float * , int * ) ));
}
hipEventRecord(gpu_start, 0);
kernel_to_compute_optimal_match<<<numblocks,numthreadsperblock,n*n*2*sizeof(float)>>>(chunck_per_cycle,num_stuff_per_thread , nfact, n, d_A, d_B, h_d_per_metric , d_obj_vals, d_obj_perms);
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
hipEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to run the kernel: %f\n", gputime/1000); fflush(stdout);
// now we copy the stuff back to the CPU and get the maximum by hand
hipEventRecord(gpu_start, 0);
hipMemcpy( (void*) h_obj_vals , (void*) d_obj_vals , chunck_per_cycle*sizeof(float) , hipMemcpyDeviceToHost );
hipMemcpy( (void*) h_obj_perms , (void*) d_obj_perms , chunck_per_cycle*sizeof(float) , hipMemcpyDeviceToHost );
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end); //this is necessary to make sure that we measure time accurately. We can also use cudaDeviceSynchronize() but that is a bit of an overkill
hipEventElapsedTime(&gputime, gpu_start, gpu_end);
printf ("PARALLEL: Time it took to copy stuff back to the CPU: %f\n", gputime/1000); fflush(stdout);
cpu_start = clock();
float best_gpu_val = FLT_MAX;
lint best_ix;
for (int i = 0 ; i < chunck_per_cycle ; i++){
float val = h_obj_vals[i];
if (val < best_gpu_val){
best_gpu_val = val;
best_ix = i;
}
}
int * perm = (int *) malloc(n * sizeof(int));
int * scrap = (int *) malloc(n * sizeof(int));
index_to_perm(best_ix, n, perm, scrap);
printf("GPU Opt Val = %f\n", best_gpu_val);
printvec(perm, n);
save_vec_to_file(perm, n , fileoutput);
cpu_end = clock();
cputime = (float)(cpu_end - cpu_start) / CLOCKS_PER_SEC;
printf("SERIAL: Time to compute the last step = %f\n",cputime); fflush(stdout);
hipFree(d_A);
hipFree(d_B);
hipFree(d_obj_vals);
hipFree(d_obj_perms);
free(h_obj_vals);
free(h_obj_perms);
free(perm);
free(scrap);
gerror( hipPeekAtLastError() );
hipDeviceSynchronize();
}
free(A);
free(B);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.p2align 2
.type _Z15fro_norm_squareiPfS_Pi,@function
_Z15fro_norm_squareiPfS_Pi:
s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
v_mov_b32_e32 v7, 0
s_mov_b32 s3, 0
s_mov_b32 s1, exec_lo
v_cmpx_lt_i32_e32 0, v0
s_cbranch_execz .LBB0_6
v_mov_b32_e32 v9, 0
v_mov_b32_e32 v7, 0
s_mov_b32 s4, 0
s_mov_b32 s2, s3
.LBB0_2:
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[6:7], s[2:3], 2
v_mov_b32_e32 v8, s2
v_add_co_u32 v10, vcc_lo, v5, s6
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v6, vcc_lo
s_mov_b32 s5, 0
s_mov_b32 s6, 0
flat_load_b32 v10, v[10:11]
v_dual_mov_b32 v12, v6 :: v_dual_mov_b32 v11, v5
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_nc_u32_e32 v10, -1, v10
.p2align 6
.LBB0_3:
flat_load_b32 v13, v[11:12]
s_add_i32 s6, s6, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s6, v0
s_or_b32 s5, s0, s5
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_nc_u32_e32 v15, -1, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[13:14], null, v15, v0, v[10:11]
v_lshlrev_b64 v[15:16], 2, v[8:9]
v_add_co_u32 v15, vcc_lo, v1, v15
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v14, 31, v13
v_add_co_ci_u32_e32 v16, vcc_lo, v2, v16, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[13:14], 2, v[13:14]
v_add_co_u32 v13, vcc_lo, v3, v13
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v14, vcc_lo, v4, v14, vcc_lo
flat_load_b32 v15, v[15:16]
flat_load_b32 v13, v[13:14]
v_add_nc_u32_e32 v8, v8, v0
v_add_co_u32 v11, vcc_lo, v11, 4
v_add_co_ci_u32_e32 v12, vcc_lo, 0, v12, vcc_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_sub_f32_e32 v13, v15, v13
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v7, v13, v13
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s5
s_add_i32 s2, s2, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_eq_u32_e32 vcc_lo, s2, v0
s_or_b32 s4, vcc_lo, s4
s_and_not1_b32 exec_lo, exec_lo, s4
s_cbranch_execnz .LBB0_2
s_or_b32 exec_lo, exec_lo, s4
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v0, v7
s_setpc_b64 s[30:31]
.Lfunc_end0:
.size _Z15fro_norm_squareiPfS_Pi, .Lfunc_end0-_Z15fro_norm_squareiPfS_Pi
.section .AMDGPU.csdata,"",@progbits
.text
.p2align 2
.type _Z7L1_normiPfS_Pi,@function
_Z7L1_normiPfS_Pi:
s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
v_mov_b32_e32 v7, 0
s_mov_b32 s3, 0
s_mov_b32 s1, exec_lo
v_cmpx_lt_i32_e32 0, v0
s_cbranch_execz .LBB1_6
v_mov_b32_e32 v9, 0
v_mov_b32_e32 v7, 0
s_mov_b32 s4, 0
s_mov_b32 s2, s3
.LBB1_2:
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[6:7], s[2:3], 2
v_mov_b32_e32 v8, s2
v_add_co_u32 v10, vcc_lo, v5, s6
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v6, vcc_lo
s_mov_b32 s5, 0
s_mov_b32 s6, 0
flat_load_b32 v10, v[10:11]
v_dual_mov_b32 v12, v6 :: v_dual_mov_b32 v11, v5
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_nc_u32_e32 v10, -1, v10
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB1_3:
flat_load_b32 v13, v[11:12]
s_add_i32 s6, s6, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s6, v0
s_or_b32 s5, s0, s5
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_nc_u32_e32 v15, -1, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[13:14], null, v15, v0, v[10:11]
v_lshlrev_b64 v[15:16], 2, v[8:9]
v_add_co_u32 v15, vcc_lo, v1, v15
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v14, 31, v13
v_add_co_ci_u32_e32 v16, vcc_lo, v2, v16, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[13:14], 2, v[13:14]
v_add_co_u32 v13, vcc_lo, v3, v13
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v14, vcc_lo, v4, v14, vcc_lo
flat_load_b32 v15, v[15:16]
flat_load_b32 v13, v[13:14]
v_add_nc_u32_e32 v8, v8, v0
v_add_co_u32 v11, vcc_lo, v11, 4
v_add_co_ci_u32_e32 v12, vcc_lo, 0, v12, vcc_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_sub_f32_e32 v13, v15, v13
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e64 v7, v7, |v13|
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB1_3
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s5
s_add_i32 s2, s2, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_eq_u32_e32 vcc_lo, s2, v0
s_or_b32 s4, vcc_lo, s4
s_and_not1_b32 exec_lo, exec_lo, s4
s_cbranch_execnz .LBB1_2
s_or_b32 exec_lo, exec_lo, s4
.LBB1_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v0, v7
s_setpc_b64 s[30:31]
.Lfunc_end1:
.size _Z7L1_normiPfS_Pi, .Lfunc_end1-_Z7L1_normiPfS_Pi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj
.globl _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj
.p2align 8
.type _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj,@function
_Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj:
s_load_b32 s48, s[2:3], 0xc
v_mov_b32_e32 v40, v0
s_mov_b64 s[38:39], s[0:1]
s_mov_b32 s33, s15
s_mov_b64 s[34:35], s[2:3]
s_mov_b32 s40, s14
v_and_b32_e32 v0, 0x3ff, v40
s_mov_b32 s41, s13
s_mov_b64 s[36:37], s[4:5]
s_mov_b32 s5, 0
s_movk_i32 s32, 0x70
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_waitcnt lgkmcnt(0)
s_cmp_lg_u32 s48, 0
s_mul_i32 s4, s48, s48
s_cselect_b32 s0, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s0, vcc_lo, s0
s_and_saveexec_b32 s6, s0
s_cbranch_execz .LBB2_3
s_load_b128 s[0:3], s[34:35], 0x10
s_max_u32 s7, s4, 1
s_lshl_b32 s8, s4, 2
.p2align 6
.LBB2_2:
s_waitcnt lgkmcnt(0)
s_load_b32 s9, s[0:1], 0x0
s_load_b32 s10, s[2:3], 0x0
s_add_i32 s11, s5, s8
s_add_i32 s7, s7, -1
s_add_u32 s0, s0, 4
v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s11
s_addc_u32 s1, s1, 0
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_add_i32 s5, s5, 4
s_cmp_lg_u32 s7, 0
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v3, s9 :: v_dual_mov_b32 v4, s10
ds_store_b32 v1, v3
ds_store_b32 v2, v4
s_cbranch_scc1 .LBB2_2
.LBB2_3:
s_or_b32 exec_lo, exec_lo, s6
s_clause 0x1
s_load_b32 s0, s[34:35], 0x44
s_load_b32 s49, s[34:35], 0x4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_b32 s0, 0xffff, s0
s_cmp_lt_i32 s49, 1
v_mad_u64_u32 v[41:42], null, s41, s0, v[0:1]
s_cbranch_scc1 .LBB2_17
s_clause 0x2
s_load_b32 s50, s[34:35], 0x0
s_load_b32 s51, s[34:35], 0x8
s_load_b64 s[42:43], s[34:35], 0x20
s_lshl_b32 s0, s4, 2
s_mov_b64 s[46:47], src_shared_base
s_add_i32 s0, s0, 0
s_mov_b64 s[44:45], src_private_base
s_cmp_lg_u32 s0, -1
v_add_nc_u32_e64 v46, 16, -4
v_mov_b32_e32 v44, 0x7f7fffff
s_cselect_b32 s44, s0, 0
s_cselect_b32 s46, s47, 0
s_cmp_gt_i32 s48, 0
s_mov_b32 s53, 16
s_cselect_b32 s52, -1, 0
s_mov_b32 s54, 0
s_sub_i32 s55, 0, s48
s_branch .LBB2_7
.LBB2_5:
s_waitcnt vmcnt(0)
s_clause 0x2
scratch_store_b128 off, v[5:8], off offset:64
scratch_store_b128 off, v[9:12], off offset:80
scratch_store_b128 off, v[13:16], off offset:96
v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, s48
v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s47
v_dual_mov_b32 v3, s44 :: v_dual_mov_b32 v4, s46
v_dual_mov_b32 v5, s53 :: v_dual_mov_b32 v6, s45
s_add_u32 s8, s34, 56
s_addc_u32 s9, s35, 0
s_mov_b64 s[4:5], s[38:39]
s_mov_b64 s[10:11], s[36:37]
s_mov_b32 s12, s41
s_mov_b32 s13, s40
s_mov_b32 s14, s33
s_swappc_b64 s[30:31], s[42:43]
v_cmp_lt_f32_e32 vcc_lo, v0, v44
v_dual_cndmask_b32 v44, v44, v0 :: v_dual_cndmask_b32 v45, v45, v42
.LBB2_6:
s_or_b32 exec_lo, exec_lo, s56
s_add_i32 s54, s54, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s54, s49
s_cbranch_scc1 .LBB2_18
.LBB2_7:
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[42:43], null, s54, s50, v[41:42]
s_mov_b32 s56, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s51, v42
s_cbranch_execz .LBB2_6
s_clause 0x2
scratch_load_b128 v[5:8], off, off offset:64
scratch_load_b128 v[9:12], off, off offset:80
scratch_load_b128 v[13:16], off, off offset:96
s_and_not1_b32 vcc_lo, exec_lo, s52
s_cbranch_vccnz .LBB2_11
v_mov_b32_e32 v0, v42
s_mov_b64 s[0:1], 0
s_mov_b32 s2, s48
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB2_10:
v_cvt_f32_u32_e32 v1, s2
s_add_i32 s3, s55, s0
s_mov_b32 m0, s0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, s3, v1
s_add_i32 s3, s2, -1
s_add_u32 s0, s0, 1
s_addc_u32 s1, s1, 0
s_cmp_lt_i32 s2, 2
v_mul_hi_u32 v2, v1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v1, v2
v_mul_hi_u32 v1, v0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_lo_u32 v2, v1, s2
v_add_nc_u32_e32 v3, 1, v1
v_sub_nc_u32_e32 v2, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v4, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
v_dual_cndmask_b32 v1, v1, v3 :: v_dual_cndmask_b32 v2, v2, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, 1, v1
v_cmp_le_u32_e32 vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v1, v1, v3, vcc_lo
v_mul_lo_u32 v2, v1, s2
s_mov_b32 s2, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v0, v0, v2
v_add_nc_u32_e32 v2, 1, v0
v_mov_b32_e32 v0, v1
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2)
v_movreld_b32_e32 v5, v2
s_cbranch_scc0 .LBB2_10
.LBB2_11:
s_set_inst_prefetch_distance 0x2
s_and_not1_b32 vcc_lo, exec_lo, s52
s_cbranch_vccnz .LBB2_5
v_mov_b32_e32 v0, 16
s_mov_b32 s0, 0
.LBB2_13:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s0, s0, 1
v_mov_b32_e32 v1, s0
s_cmp_lg_u32 s48, s0
scratch_store_b32 v0, v1, off
v_add_nc_u32_e32 v0, 4, v0
s_cbranch_scc1 .LBB2_13
s_and_not1_b32 vcc_lo, exec_lo, s52
s_cbranch_vccnz .LBB2_5
v_mov_b32_e32 v0, v46
s_mov_b64 s[0:1], 0
.p2align 6
.LBB2_16:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_mov_b32 m0, s0
v_add_nc_u32_e32 v2, 4, v0
s_waitcnt vmcnt(0)
v_movrels_b32_e32 v1, v5
s_add_u32 s0, s0, 1
s_addc_u32 s1, s1, 0
s_cmp_lg_u32 s48, s0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v1, v1, 2, v0
v_mov_b32_e32 v0, v2
s_clause 0x1
scratch_load_b32 v3, v2, off
scratch_load_b32 v4, v1, off
s_waitcnt vmcnt(0)
s_clause 0x1
scratch_store_b32 v2, v4, off
scratch_store_b32 v1, v3, off
s_cbranch_scc1 .LBB2_16
s_branch .LBB2_5
.LBB2_17:
v_mov_b32_e32 v44, 0x7f7fffff
.LBB2_18:
s_load_b128 s[0:3], s[34:35], 0x28
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v42, 31, v41
v_lshlrev_b64 v[0:1], 2, v[41:42]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b32 v[2:3], v44, off
global_store_b32 v[0:1], v45, off
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 112
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 1
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 1
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 1
.amdhsa_enable_private_segment 1
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 47
.amdhsa_next_free_sgpr 57
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj, .Lfunc_end2-_Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected d_ptr_fro_norm_square
.type d_ptr_fro_norm_square,@object
.data
.globl d_ptr_fro_norm_square
.p2align 3, 0x0
d_ptr_fro_norm_square:
.quad _Z15fro_norm_squareiPfS_Pi
.size d_ptr_fro_norm_square, 8
.protected d_ptr_L1_norm
.type d_ptr_L1_norm,@object
.globl d_ptr_L1_norm
.p2align 3, 0x0
d_ptr_L1_norm:
.quad _Z7L1_normiPfS_Pi
.size d_ptr_L1_norm, 8
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z15fro_norm_squareiPfS_Pi
.addrsig_sym _Z7L1_normiPfS_Pi
.addrsig_sym d_ptr_fro_norm_square
.addrsig_sym d_ptr_L1_norm
.addrsig_sym AB_shared_mem
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .offset: 56
.size: 4
.value_kind: hidden_block_count_x
- .offset: 60
.size: 4
.value_kind: hidden_block_count_y
- .offset: 64
.size: 4
.value_kind: hidden_block_count_z
- .offset: 68
.size: 2
.value_kind: hidden_group_size_x
- .offset: 70
.size: 2
.value_kind: hidden_group_size_y
- .offset: 72
.size: 2
.value_kind: hidden_group_size_z
- .offset: 74
.size: 2
.value_kind: hidden_remainder_x
- .offset: 76
.size: 2
.value_kind: hidden_remainder_y
- .offset: 78
.size: 2
.value_kind: hidden_remainder_z
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 120
.size: 2
.value_kind: hidden_grid_dims
- .offset: 136
.size: 8
.value_kind: hidden_hostcall_buffer
- .offset: 144
.size: 8
.value_kind: hidden_multigrid_sync_arg
- .offset: 152
.size: 8
.value_kind: hidden_heap_v1
- .offset: 160
.size: 8
.value_kind: hidden_default_queue
- .offset: 176
.size: 4
.value_kind: hidden_dynamic_lds_size
- .offset: 256
.size: 8
.value_kind: hidden_queue_ptr
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj
.private_segment_fixed_size: 112
.sgpr_count: 59
.sgpr_spill_count: 0
.symbol: _Z31kernel_to_compute_optimal_matchiijiPfS_PFfiS_S_PiES_Pj.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: true
.vgpr_count: 47
.vgpr_spill_count: 12
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /**
* @file strongestNeighborScan.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <stdio.h>
#include <stdlib.h>
/**
* Scans input in parallel picks two elements with a stride s, checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
*
* @param src input array that denotes each segment in the graph
* @param oldDst input array that denotes the destination of each edge in src
* @param newDst output array to be modified with new greatest destinatuon
* @param oldWeight input array that denotes the weight of each edge in src
* @param newWeight output array to be modified with new greatest edge weight
* @param madeChanges integer flag for any changed made by function
* @param distance stride distance
* @param numEdges the number of edges/elements in the above arrays
*/
__global__ void strongestNeighborScan_gpu(
int * src,
int * oldDst, int * newDst,
int * oldWeight, int * newWeight,
int * madeChanges,
int distance,
int numEdges
) {
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID < numEdges; curTID += NUM_THREADS) {
// get compare thread index, enforce 0 bound
const int COMPARE_T_ID = curTID - distance > 0 ? curTID - distance : 0;
// case : shared segment
if( src[COMPARE_T_ID] == src[curTID]) {
int strongerIndex;
const int COMPARE_T_WEIGHT = oldWeight[COMPARE_T_ID];
const int CUR_T_WEIGHT = oldWeight[curTID];
if(COMPARE_T_WEIGHT > CUR_T_WEIGHT) {
strongerIndex = COMPARE_T_ID;
}
else if(COMPARE_T_WEIGHT < CUR_T_WEIGHT) {
strongerIndex = curTID;
}
// case: equal weights, take node with smaller vID
else {
const int COMPARE_T_D = oldDst[COMPARE_T_ID];
const int CUR_T_D = oldDst[curTID];
if(COMPARE_T_D < CUR_T_D) {
strongerIndex = COMPARE_T_ID;
} else {
strongerIndex = curTID;
};
}
//Set new destination
newDst[curTID] = oldDst[strongerIndex];
//Set new weight
newWeight[curTID] = oldWeight[strongerIndex];
if(newDst[curTID] != oldDst[curTID]) { *madeChanges = 1; };
}
// case : different segment
else {
// defaults to no change
newDst[curTID] = oldDst[curTID];
newWeight[curTID] = oldWeight[curTID];
}
}
}
/**
* During each iteration of parallel segment-scan, each (independent) task picks two elements with a stride s,
* checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
* A parallel segment-scan may involve multiple iterations,
* the first iteration uses stride s = 1 and the stride s doubles at every iteration.
*/ | code for sm_80
Function : _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0a7624 */
/* 0x000fc600078e00ff */
/*0030*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0040*/ IMAD R0, R10, c[0x0][0xc], RZ ; /* 0x000003000a007a24 */
/* 0x000fc600078e02ff */
/*0050*/ S2R R8, SR_CTAID.Y ; /* 0x0000000000087919 */
/* 0x000e680000002600 */
/*0060*/ S2R R11, SR_TID.Y ; /* 0x00000000000b7919 */
/* 0x000e620000002200 */
/*0070*/ IMAD R3, R6, c[0x0][0x0], R9 ; /* 0x0000000006037a24 */
/* 0x001fe400078e0209 */
/*0080*/ IMAD R2, R8, c[0x0][0x4], R11 ; /* 0x0000010008027a24 */
/* 0x002fc800078e020b */
/*0090*/ IMAD R2, R0, R2, R3 ; /* 0x0000000200027224 */
/* 0x000fca00078e0203 */
/*00a0*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x194], PT ; /* 0x0000650002007a0c */
/* 0x000fda0003f06270 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ I2F.U32.RP R7, R0 ; /* 0x0000000000077306 */
/* 0x000e220000209000 */
/*00d0*/ IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0a08 */
/*00e0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f45070 */
/*00f0*/ IMAD.MOV R12, RZ, RZ, -R0 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e0a00 */
/*0100*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0110*/ IMAD R3, R8, c[0x0][0x4], -R11 ; /* 0x0000010008037a24 */
/* 0x000fe200078e0a0b */
/*0120*/ BSSY B0, 0x670 ; /* 0x0000054000007945 */
/* 0x000fe20003800000 */
/*0130*/ IMAD.MOV.U32 R11, RZ, RZ, R12 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e000c */
/*0140*/ IMAD R3, R3, c[0x0][0xc], -R6 ; /* 0x0000030003037a24 */
/* 0x000fe200078e0a06 */
/*0150*/ LOP3.LUT R6, RZ, R9, RZ, 0x33, !PT ; /* 0x00000009ff067212 */
/* 0x000fc600078e33ff */
/*0160*/ IMAD R3, R3, R10, c[0x0][0x194] ; /* 0x0000650003037624 */
/* 0x000fe200078e020a */
/*0170*/ MUFU.RCP R7, R7 ; /* 0x0000000700077308 */
/* 0x001e260000001000 */
/*0180*/ IMAD.IADD R3, R3, 0x1, R6 ; /* 0x0000000103037824 */
/* 0x000fe200078e0206 */
/*0190*/ IADD3 R4, R7, 0xffffffe, RZ ; /* 0x0ffffffe07047810 */
/* 0x001fc80007ffe0ff */
/*01a0*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*01b0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x001fe400078e00ff */
/*01c0*/ IMAD R11, R11, R5, RZ ; /* 0x000000050b0b7224 */
/* 0x002fc800078e02ff */
/*01d0*/ IMAD.HI.U32 R8, R5, R11, R4 ; /* 0x0000000b05087227 */
/* 0x000fcc00078e0004 */
/*01e0*/ IMAD.HI.U32 R8, R8, R3, RZ ; /* 0x0000000308087227 */
/* 0x000fc800078e00ff */
/*01f0*/ IMAD.MOV R5, RZ, RZ, -R8 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a08 */
/*0200*/ IMAD R5, R0, R5, R3 ; /* 0x0000000500057224 */
/* 0x000fca00078e0203 */
/*0210*/ ISETP.GE.U32.AND P0, PT, R5, R0, PT ; /* 0x000000000500720c */
/* 0x000fda0003f06070 */
/*0220*/ @P0 IMAD.IADD R5, R5, 0x1, -R0 ; /* 0x0000000105050824 */
/* 0x000fe200078e0a00 */
/*0230*/ @P0 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108080810 */
/* 0x000fc80007ffe0ff */
/*0240*/ ISETP.GE.U32.AND P1, PT, R5, R0, PT ; /* 0x000000000500720c */
/* 0x000fda0003f26070 */
/*0250*/ @P1 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108081810 */
/* 0x000fe40007ffe0ff */
/*0260*/ @!P2 LOP3.LUT R8, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff08a212 */
/* 0x000fc800078e33ff */
/*0270*/ LOP3.LUT R8, R8, 0x1, RZ, 0xc0, !PT ; /* 0x0000000108087812 */
/* 0x000fc800078ec0ff */
/*0280*/ ISETP.NE.U32.AND P0, PT, R8, 0x1, PT ; /* 0x000000010800780c */
/* 0x000fda0003f05070 */
/*0290*/ @!P0 BRA 0x660 ; /* 0x000003c000008947 */
/* 0x000fea0003800000 */
/*02a0*/ IADD3 R6, R2, -c[0x0][0x190], RZ ; /* 0x8000640002067a10 */
/* 0x000fe20007ffe0ff */
/*02b0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fc600078e00ff */
/*02c0*/ IMNMX R6, RZ, R6, !PT ; /* 0x00000006ff067217 */
/* 0x000fe20007800200 */
/*02d0*/ IMAD.WIDE R14, R2, R7, c[0x0][0x160] ; /* 0x00005800020e7625 */
/* 0x000fc800078e0207 */
/*02e0*/ IMAD.WIDE R10, R6, R7.reuse, c[0x0][0x160] ; /* 0x00005800060a7625 */
/* 0x080fe400078e0207 */
/*02f0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea8000c1e1900 */
/*0300*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000ea2000c1e1900 */
/*0310*/ IMAD.WIDE R4, R2, R7, c[0x0][0x168] ; /* 0x00005a0002047625 */
/* 0x000fe200078e0207 */
/*0320*/ ISETP.NE.AND P0, PT, R11, R14, PT ; /* 0x0000000e0b00720c */
/* 0x004fda0003f05270 */
/*0330*/ @P0 LDG.E R19, [R4.64] ; /* 0x0000000404130981 */
/* 0x000ea2000c1e1900 */
/*0340*/ IMAD.WIDE R16, R2, R7, c[0x0][0x170] ; /* 0x00005c0002107625 */
/* 0x000fc800078e0207 */
/*0350*/ IMAD.WIDE R12, R2.reuse, R7.reuse, c[0x0][0x178] ; /* 0x00005e00020c7625 */
/* 0x0c0fe200078e0207 */
/*0360*/ @P0 STG.E [R16.64], R19 ; /* 0x0000001310000986 */
/* 0x0041e8000c101904 */
/*0370*/ @P0 LDG.E R21, [R12.64] ; /* 0x000000040c150981 */
/* 0x000ea2000c1e1900 */
/*0380*/ IMAD.WIDE R8, R2, R7, c[0x0][0x180] ; /* 0x0000600002087625 */
/* 0x000fe200078e0207 */
/*0390*/ BSSY B1, 0x650 ; /* 0x000002b000017945 */
/* 0x000fe80003800000 */
/*03a0*/ @P0 STG.E [R8.64], R21 ; /* 0x0000001508000986 */
/* 0x0041e2000c101904 */
/*03b0*/ @P0 BRA 0x640 ; /* 0x0000028000000947 */
/* 0x000fea0003800000 */
/*03c0*/ IMAD.SHL.U32 R16, R6.reuse, 0x4, RZ ; /* 0x0000000406107824 */
/* 0x041fe200078e00ff */
/*03d0*/ SHF.R.S32.HI R15, RZ, 0x1f, R6 ; /* 0x0000001fff0f7819 */
/* 0x000fe20000011406 */
/*03e0*/ LDG.E R17, [R12.64] ; /* 0x000000040c117981 */
/* 0x000ea6000c1e1900 */
/*03f0*/ SHF.L.U64.HI R19, R6, 0x2, R15 ; /* 0x0000000206137819 */
/* 0x000fc4000001020f */
/*0400*/ IADD3 R10, P0, R16, c[0x0][0x178], RZ ; /* 0x00005e00100a7a10 */
/* 0x000fc80007f1e0ff */
/*0410*/ IADD3.X R11, R19, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f00130b7a10 */
/* 0x000fca00007fe4ff */
/*0420*/ LDG.E R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea2000c1e1900 */
/*0430*/ BSSY B2, 0x560 ; /* 0x0000012000027945 */
/* 0x000fe20003800000 */
/*0440*/ IMAD.MOV.U32 R14, RZ, RZ, R6 ; /* 0x000000ffff0e7224 */
/* 0x000fe200078e0006 */
/*0450*/ ISETP.GT.AND P0, PT, R18, R17, PT ; /* 0x000000111200720c */
/* 0x004fda0003f04270 */
/*0460*/ @P0 BRA 0x550 ; /* 0x000000e000000947 */
/* 0x000fea0003800000 */
/*0470*/ ISETP.GE.AND P0, PT, R18, R17, PT ; /* 0x000000111200720c */
/* 0x000fe20003f06270 */
/*0480*/ IMAD.MOV.U32 R11, RZ, RZ, R13 ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e000d */
/*0490*/ SHF.R.S32.HI R15, RZ, 0x1f, R2.reuse ; /* 0x0000001fff0f7819 */
/* 0x100fe20000011402 */
/*04a0*/ IMAD.MOV.U32 R14, RZ, RZ, R2 ; /* 0x000000ffff0e7224 */
/* 0x000fe200078e0002 */
/*04b0*/ MOV R10, R12 ; /* 0x0000000c000a7202 */
/* 0x000fd20000000f00 */
/*04c0*/ @!P0 BRA 0x550 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*04d0*/ IADD3 R12, P0, R16, c[0x0][0x168], RZ ; /* 0x00005a00100c7a10 */
/* 0x000fe20007f1e0ff */
/*04e0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */
/* 0x000ea6000c1e1900 */
/*04f0*/ IADD3.X R13, R19, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b00130d7a10 */
/* 0x000fcc00007fe4ff */
/*0500*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea4000c1e1900 */
/*0510*/ ISETP.GE.AND P0, PT, R13, R10, PT ; /* 0x0000000a0d00720c */
/* 0x004fc80003f06270 */
/*0520*/ SEL R14, R6, R2, !P0 ; /* 0x00000002060e7207 */
/* 0x000fc80004000000 */
/*0530*/ SHF.R.S32.HI R15, RZ, 0x1f, R14 ; /* 0x0000001fff0f7819 */
/* 0x000fe2000001140e */
/*0540*/ IMAD.WIDE R10, R14, R7, c[0x0][0x178] ; /* 0x00005e000e0a7625 */
/* 0x000fc800078e0207 */
/*0550*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0560*/ LEA R16, P0, R14, c[0x0][0x168], 0x2 ; /* 0x00005a000e107a11 */
/* 0x000fc800078010ff */
/*0570*/ LEA.HI.X R17, R14, c[0x0][0x16c], R15, 0x2, P0 ; /* 0x00005b000e117a11 */
/* 0x000fcc00000f140f */
/*0580*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ea2000c1e1900 */
/*0590*/ IMAD.WIDE R6, R2, R7, c[0x0][0x170] ; /* 0x00005c0002067625 */
/* 0x000fca00078e0207 */
/*05a0*/ STG.E [R6.64], R17 ; /* 0x0000001106007986 */
/* 0x0041e8000c101904 */
/*05b0*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000ea8000c1e1900 */
/*05c0*/ STG.E [R8.64], R11 ; /* 0x0000000b08007986 */
/* 0x0041e8000c101904 */
/*05d0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*05e0*/ LDG.E R13, [R6.64] ; /* 0x00000004060d7981 */
/* 0x000ea4000c1e1900 */
/*05f0*/ ISETP.NE.AND P0, PT, R13, R4, PT ; /* 0x000000040d00720c */
/* 0x004fda0003f05270 */
/*0600*/ @P0 IMAD.MOV.U32 R15, RZ, RZ, 0x1 ; /* 0x00000001ff0f0424 */
/* 0x000fe400078e00ff */
/*0610*/ @P0 IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff0c0624 */
/* 0x000fe400078e00ff */
/*0620*/ @P0 IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff0d0624 */
/* 0x000fca00078e00ff */
/*0630*/ @P0 STG.E [R12.64], R15 ; /* 0x0000000f0c000986 */
/* 0x0001e4000c101904 */
/*0640*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0650*/ IMAD.IADD R2, R0, 0x1, R2 ; /* 0x0000000100027824 */
/* 0x000fe400078e0202 */
/*0660*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0670*/ ISETP.GT.U32.AND P0, PT, R0, R3, PT ; /* 0x000000030000720c */
/* 0x000fda0003f04070 */
/*0680*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0690*/ IADD3 R16, R2, -c[0x0][0x190], RZ ; /* 0x8000640002107a10 */
/* 0x001fe20007ffe0ff */
/*06a0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc600078e00ff */
/*06b0*/ IMNMX R16, RZ, R16, !PT ; /* 0x00000010ff107217 */
/* 0x000fe20007800200 */
/*06c0*/ IMAD.WIDE R4, R2, R3, c[0x0][0x160] ; /* 0x0000580002047625 */
/* 0x000fc800078e0203 */
/*06d0*/ IMAD.WIDE R12, R16, R3.reuse, c[0x0][0x160] ; /* 0x00005800100c7625 */
/* 0x080fe200078e0203 */
/*06e0*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000eaa000c1e1900 */
/*06f0*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea4000c1e1900 */
/*0700*/ ISETP.NE.AND P0, PT, R13, R6, PT ; /* 0x000000060d00720c */
/* 0x004fe20003f05270 */
/*0710*/ IMAD.WIDE R6, R2, R3, c[0x0][0x168] ; /* 0x00005a0002067625 */
/* 0x000fd800078e0203 */
/*0720*/ @P0 LDG.E R17, [R6.64] ; /* 0x0000000406110981 */
/* 0x000ea2000c1e1900 */
/*0730*/ IMAD.WIDE R8, R2, R3, c[0x0][0x170] ; /* 0x00005c0002087625 */
/* 0x000fc800078e0203 */
/*0740*/ IMAD.WIDE R14, R2.reuse, R3.reuse, c[0x0][0x178] ; /* 0x00005e00020e7625 */
/* 0x0c0fe200078e0203 */
/*0750*/ @P0 STG.E [R8.64], R17 ; /* 0x0000001108000986 */
/* 0x0041e8000c101904 */
/*0760*/ @P0 LDG.E R19, [R14.64] ; /* 0x000000040e130981 */
/* 0x000ea2000c1e1900 */
/*0770*/ IMAD.WIDE R10, R2, R3, c[0x0][0x180] ; /* 0x00006000020a7625 */
/* 0x000fe200078e0203 */
/*0780*/ BSSY B0, 0xa30 ; /* 0x000002a000007945 */
/* 0x000fe80003800000 */
/*0790*/ @P0 STG.E [R10.64], R19 ; /* 0x000000130a000986 */
/* 0x0041e2000c101904 */
/*07a0*/ @P0 BRA 0xa20 ; /* 0x0000027000000947 */
/* 0x000fea0003800000 */
/*07b0*/ IMAD.SHL.U32 R17, R16.reuse, 0x4, RZ ; /* 0x0000000410117824 */
/* 0x041fe200078e00ff */
/*07c0*/ SHF.R.S32.HI R19, RZ, 0x1f, R16 ; /* 0x0000001fff137819 */
/* 0x000fe20000011410 */
/*07d0*/ LDG.E R20, [R14.64] ; /* 0x000000040e147981 */
/* 0x000ea6000c1e1900 */
/*07e0*/ SHF.L.U64.HI R22, R16, 0x2, R19 ; /* 0x0000000210167819 */
/* 0x000fc40000010213 */
/*07f0*/ IADD3 R12, P0, R17, c[0x0][0x178], RZ ; /* 0x00005e00110c7a10 */
/* 0x000fc80007f1e0ff */
/*0800*/ IADD3.X R13, R22, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f00160d7a10 */
/* 0x000fca00007fe4ff */
/*0810*/ LDG.E R21, [R12.64] ; /* 0x000000040c157981 */
/* 0x000ea2000c1e1900 */
/*0820*/ BSSY B1, 0x950 ; /* 0x0000012000017945 */
/* 0x000fe20003800000 */
/*0830*/ MOV R18, R16 ; /* 0x0000001000127202 */
/* 0x000fe40000000f00 */
/*0840*/ ISETP.GT.AND P0, PT, R21, R20, PT ; /* 0x000000141500720c */
/* 0x004fda0003f04270 */
/*0850*/ @P0 BRA 0x940 ; /* 0x000000e000000947 */
/* 0x000fea0003800000 */
/*0860*/ ISETP.GE.AND P0, PT, R21, R20, PT ; /* 0x000000141500720c */
/* 0x000fe20003f06270 */
/*0870*/ IMAD.MOV.U32 R12, RZ, RZ, R14 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e000e */
/*0880*/ SHF.R.S32.HI R19, RZ, 0x1f, R2.reuse ; /* 0x0000001fff137819 */
/* 0x100fe20000011402 */
/*0890*/ IMAD.MOV.U32 R13, RZ, RZ, R15 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e000f */
/*08a0*/ IMAD.MOV.U32 R18, RZ, RZ, R2 ; /* 0x000000ffff127224 */
/* 0x000fd000078e0002 */
/*08b0*/ @!P0 BRA 0x940 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*08c0*/ IADD3 R14, P0, R17, c[0x0][0x168], RZ ; /* 0x00005a00110e7a10 */
/* 0x000fe20007f1e0ff */
/*08d0*/ LDG.E R12, [R6.64] ; /* 0x00000004060c7981 */
/* 0x000ea6000c1e1900 */
/*08e0*/ IADD3.X R15, R22, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b00160f7a10 */
/* 0x000fcc00007fe4ff */
/*08f0*/ LDG.E R15, [R14.64] ; /* 0x000000040e0f7981 */
/* 0x000ea4000c1e1900 */
/*0900*/ ISETP.GE.AND P0, PT, R15, R12, PT ; /* 0x0000000c0f00720c */
/* 0x004fc80003f06270 */
/*0910*/ SEL R18, R16, R2, !P0 ; /* 0x0000000210127207 */
/* 0x000fc80004000000 */
/*0920*/ SHF.R.S32.HI R19, RZ, 0x1f, R18 ; /* 0x0000001fff137819 */
/* 0x000fe20000011412 */
/*0930*/ IMAD.WIDE R12, R18, R3, c[0x0][0x178] ; /* 0x00005e00120c7625 */
/* 0x000fc800078e0203 */
/*0940*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0950*/ LEA R14, P0, R18, c[0x0][0x168], 0x2 ; /* 0x00005a00120e7a11 */
/* 0x000fc800078010ff */
/*0960*/ LEA.HI.X R15, R18, c[0x0][0x16c], R19, 0x2, P0 ; /* 0x00005b00120f7a11 */
/* 0x000fcc00000f1413 */
/*0970*/ LDG.E R15, [R14.64] ; /* 0x000000040e0f7981 */
/* 0x000ea8000c1e1900 */
/*0980*/ STG.E [R8.64], R15 ; /* 0x0000000f08007986 */
/* 0x0041e8000c101904 */
/*0990*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea8000c1e1900 */
/*09a0*/ STG.E [R10.64], R13 ; /* 0x0000000d0a007986 */
/* 0x0041e8000c101904 */
/*09b0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea8000c1e1900 */
/*09c0*/ LDG.E R17, [R8.64] ; /* 0x0000000408117981 */
/* 0x000ea4000c1e1900 */
/*09d0*/ ISETP.NE.AND P0, PT, R17, R6, PT ; /* 0x000000061100720c */
/* 0x004fda0003f05270 */
/*09e0*/ @P0 IMAD.MOV.U32 R19, RZ, RZ, 0x1 ; /* 0x00000001ff130424 */
/* 0x000fe400078e00ff */
/*09f0*/ @P0 IMAD.MOV.U32 R16, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff100624 */
/* 0x000fe400078e00ff */
/*0a00*/ @P0 IMAD.MOV.U32 R17, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff110624 */
/* 0x000fca00078e00ff */
/*0a10*/ @P0 STG.E [R16.64], R19 ; /* 0x0000001310000986 */
/* 0x0001e4000c101904 */
/*0a20*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0a30*/ IMAD.IADD R2, R0.reuse, 0x1, R2 ; /* 0x0000000100027824 */
/* 0x040fe400078e0202 */
/*0a40*/ IMAD.WIDE R16, R0, 0x4, R4 ; /* 0x0000000400107825 */
/* 0x001fc600078e0204 */
/*0a50*/ IADD3 R10, R2, -c[0x0][0x190], RZ ; /* 0x80006400020a7a10 */
/* 0x000fc60007ffe0ff */
/*0a60*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ea2000c1e1900 */
/*0a70*/ IMNMX R10, RZ, R10, !PT ; /* 0x0000000aff0a7217 */
/* 0x000fca0007800200 */
/*0a80*/ IMAD.WIDE R12, R10, R3, c[0x0][0x160] ; /* 0x000058000a0c7625 */
/* 0x000fcc00078e0203 */
/*0a90*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea2000c1e1900 */
/*0aa0*/ IMAD.WIDE R4, R2, R3, c[0x0][0x168] ; /* 0x00005a0002047625 */
/* 0x000fe200078e0203 */
/*0ab0*/ ISETP.NE.AND P0, PT, R13, R16, PT ; /* 0x000000100d00720c */
/* 0x004fda0003f05270 */
/*0ac0*/ @P0 LDG.E R11, [R4.64] ; /* 0x00000004040b0981 */
/* 0x000ea2000c1e1900 */
/*0ad0*/ IMAD.WIDE R6, R2, R3, c[0x0][0x170] ; /* 0x00005c0002067625 */
/* 0x000fc800078e0203 */
/*0ae0*/ IMAD.WIDE R14, R2.reuse, R3.reuse, c[0x0][0x178] ; /* 0x00005e00020e7625 */
/* 0x0c0fe200078e0203 */
/*0af0*/ @P0 STG.E [R6.64], R11 ; /* 0x0000000b06000986 */
/* 0x0041e8000c101904 */
/*0b00*/ @P0 LDG.E R19, [R14.64] ; /* 0x000000040e130981 */
/* 0x000ea2000c1e1900 */
/*0b10*/ IMAD.WIDE R8, R2, R3, c[0x0][0x180] ; /* 0x0000600002087625 */
/* 0x000fe200078e0203 */
/*0b20*/ BSSY B0, 0xdd0 ; /* 0x000002a000007945 */
/* 0x000fe80003800000 */
/*0b30*/ @P0 STG.E [R8.64], R19 ; /* 0x0000001308000986 */
/* 0x0041e2000c101904 */
/*0b40*/ @P0 BRA 0xdc0 ; /* 0x0000027000000947 */
/* 0x000fea0003800000 */
/*0b50*/ IMAD.SHL.U32 R11, R10.reuse, 0x4, RZ ; /* 0x000000040a0b7824 */
/* 0x041fe200078e00ff */
/*0b60*/ SHF.R.S32.HI R17, RZ, 0x1f, R10 ; /* 0x0000001fff117819 */
/* 0x000fe2000001140a */
/*0b70*/ LDG.E R18, [R14.64] ; /* 0x000000040e127981 */
/* 0x000ea6000c1e1900 */
/*0b80*/ SHF.L.U64.HI R20, R10, 0x2, R17 ; /* 0x000000020a147819 */
/* 0x000fc40000010211 */
/*0b90*/ IADD3 R12, P0, R11, c[0x0][0x178], RZ ; /* 0x00005e000b0c7a10 */
/* 0x000fc80007f1e0ff */
/*0ba0*/ IADD3.X R13, R20, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f00140d7a10 */
/* 0x000fca00007fe4ff */
/*0bb0*/ LDG.E R19, [R12.64] ; /* 0x000000040c137981 */
/* 0x000ea2000c1e1900 */
/*0bc0*/ BSSY B1, 0xcf0 ; /* 0x0000012000017945 */
/* 0x000fe20003800000 */
/*0bd0*/ MOV R16, R10 ; /* 0x0000000a00107202 */
/* 0x000fe40000000f00 */
/*0be0*/ ISETP.GT.AND P0, PT, R19, R18, PT ; /* 0x000000121300720c */
/* 0x004fda0003f04270 */
/*0bf0*/ @P0 BRA 0xce0 ; /* 0x000000e000000947 */
/* 0x000fea0003800000 */
/*0c00*/ ISETP.GE.AND P0, PT, R19, R18, PT ; /* 0x000000121300720c */
/* 0x000fe20003f06270 */
/*0c10*/ IMAD.MOV.U32 R12, RZ, RZ, R14 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e000e */
/*0c20*/ SHF.R.S32.HI R17, RZ, 0x1f, R2.reuse ; /* 0x0000001fff117819 */
/* 0x100fe20000011402 */
/*0c30*/ IMAD.MOV.U32 R13, RZ, RZ, R15 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e000f */
/*0c40*/ IMAD.MOV.U32 R16, RZ, RZ, R2 ; /* 0x000000ffff107224 */
/* 0x000fd000078e0002 */
/*0c50*/ @!P0 BRA 0xce0 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*0c60*/ IADD3 R14, P0, R11, c[0x0][0x168], RZ ; /* 0x00005a000b0e7a10 */
/* 0x000fe40007f1e0ff */
/*0c70*/ LDG.E R11, [R4.64] ; /* 0x00000004040b7981 */
/* 0x000ea4000c1e1900 */
/*0c80*/ IADD3.X R15, R20, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b00140f7a10 */
/* 0x000fca00007fe4ff */
/*0c90*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea4000c1e1900 */
/*0ca0*/ ISETP.GE.AND P0, PT, R14, R11, PT ; /* 0x0000000b0e00720c */
/* 0x004fc80003f06270 */
/*0cb0*/ SEL R16, R10, R2, !P0 ; /* 0x000000020a107207 */
/* 0x000fc80004000000 */
/*0cc0*/ SHF.R.S32.HI R17, RZ, 0x1f, R16 ; /* 0x0000001fff117819 */
/* 0x000fe20000011410 */
/*0cd0*/ IMAD.WIDE R12, R16, R3, c[0x0][0x178] ; /* 0x00005e00100c7625 */
/* 0x000fc800078e0203 */
/*0ce0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0cf0*/ LEA R10, P0, R16, c[0x0][0x168], 0x2 ; /* 0x00005a00100a7a11 */
/* 0x000fc800078010ff */
/*0d00*/ LEA.HI.X R11, R16, c[0x0][0x16c], R17, 0x2, P0 ; /* 0x00005b00100b7a11 */
/* 0x000fcc00000f1411 */
/*0d10*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000ea8000c1e1900 */
/*0d20*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0041e8000c101904 */
/*0d30*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea8000c1e1900 */
/*0d40*/ STG.E [R8.64], R13 ; /* 0x0000000d08007986 */
/* 0x0041e8000c101904 */
/*0d50*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0d60*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */
/* 0x000ea4000c1e1900 */
/*0d70*/ ISETP.NE.AND P0, PT, R3, R4, PT ; /* 0x000000040300720c */
/* 0x004fda0003f05270 */
/*0d80*/ @P0 IMAD.MOV.U32 R3, RZ, RZ, 0x1 ; /* 0x00000001ff030424 */
/* 0x000fe400078e00ff */
/*0d90*/ @P0 IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff0e0624 */
/* 0x000fe400078e00ff */
/*0da0*/ @P0 IMAD.MOV.U32 R15, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff0f0624 */
/* 0x000fca00078e00ff */
/*0db0*/ @P0 STG.E [R14.64], R3 ; /* 0x000000030e000986 */
/* 0x0001e4000c101904 */
/*0dc0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0dd0*/ IMAD.IADD R2, R0, 0x1, R2 ; /* 0x0000000100027824 */
/* 0x000fca00078e0202 */
/*0de0*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x194], PT ; /* 0x0000650002007a0c */
/* 0x000fda0003f06270 */
/*0df0*/ @!P0 BRA 0x690 ; /* 0xfffff89000008947 */
/* 0x000fea000383ffff */
/*0e00*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0e10*/ BRA 0xe10; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0e20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ea0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0eb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ec0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ed0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ee0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ef0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /**
* @file strongestNeighborScan.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <stdio.h>
#include <stdlib.h>
/**
* Scans input in parallel picks two elements with a stride s, checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
*
* @param src input array that denotes each segment in the graph
* @param oldDst input array that denotes the destination of each edge in src
* @param newDst output array to be modified with new greatest destinatuon
* @param oldWeight input array that denotes the weight of each edge in src
* @param newWeight output array to be modified with new greatest edge weight
* @param madeChanges integer flag for any changed made by function
* @param distance stride distance
* @param numEdges the number of edges/elements in the above arrays
*/
__global__ void strongestNeighborScan_gpu(
int * src,
int * oldDst, int * newDst,
int * oldWeight, int * newWeight,
int * madeChanges,
int distance,
int numEdges
) {
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID < numEdges; curTID += NUM_THREADS) {
// get compare thread index, enforce 0 bound
const int COMPARE_T_ID = curTID - distance > 0 ? curTID - distance : 0;
// case : shared segment
if( src[COMPARE_T_ID] == src[curTID]) {
int strongerIndex;
const int COMPARE_T_WEIGHT = oldWeight[COMPARE_T_ID];
const int CUR_T_WEIGHT = oldWeight[curTID];
if(COMPARE_T_WEIGHT > CUR_T_WEIGHT) {
strongerIndex = COMPARE_T_ID;
}
else if(COMPARE_T_WEIGHT < CUR_T_WEIGHT) {
strongerIndex = curTID;
}
// case: equal weights, take node with smaller vID
else {
const int COMPARE_T_D = oldDst[COMPARE_T_ID];
const int CUR_T_D = oldDst[curTID];
if(COMPARE_T_D < CUR_T_D) {
strongerIndex = COMPARE_T_ID;
} else {
strongerIndex = curTID;
};
}
//Set new destination
newDst[curTID] = oldDst[strongerIndex];
//Set new weight
newWeight[curTID] = oldWeight[strongerIndex];
if(newDst[curTID] != oldDst[curTID]) { *madeChanges = 1; };
}
// case : different segment
else {
// defaults to no change
newDst[curTID] = oldDst[curTID];
newWeight[curTID] = oldWeight[curTID];
}
}
}
/**
* During each iteration of parallel segment-scan, each (independent) task picks two elements with a stride s,
* checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
* A parallel segment-scan may involve multiple iterations,
* the first iteration uses stride s = 1 and the stride s doubles at every iteration.
*/ | .file "tmpxft_00019e3c_00000000-6_strongestNeighborScan.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii
.type _Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii, @function
_Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii:
.LFB2081:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii, .-_Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii
.globl _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.type _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, @function
_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, .-_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /**
* @file strongestNeighborScan.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <stdio.h>
#include <stdlib.h>
/**
* Scans input in parallel picks two elements with a stride s, checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
*
* @param src input array that denotes each segment in the graph
* @param oldDst input array that denotes the destination of each edge in src
* @param newDst output array to be modified with new greatest destinatuon
* @param oldWeight input array that denotes the weight of each edge in src
* @param newWeight output array to be modified with new greatest edge weight
* @param madeChanges integer flag for any changed made by function
* @param distance stride distance
* @param numEdges the number of edges/elements in the above arrays
*/
__global__ void strongestNeighborScan_gpu(
int * src,
int * oldDst, int * newDst,
int * oldWeight, int * newWeight,
int * madeChanges,
int distance,
int numEdges
) {
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID < numEdges; curTID += NUM_THREADS) {
// get compare thread index, enforce 0 bound
const int COMPARE_T_ID = curTID - distance > 0 ? curTID - distance : 0;
// case : shared segment
if( src[COMPARE_T_ID] == src[curTID]) {
int strongerIndex;
const int COMPARE_T_WEIGHT = oldWeight[COMPARE_T_ID];
const int CUR_T_WEIGHT = oldWeight[curTID];
if(COMPARE_T_WEIGHT > CUR_T_WEIGHT) {
strongerIndex = COMPARE_T_ID;
}
else if(COMPARE_T_WEIGHT < CUR_T_WEIGHT) {
strongerIndex = curTID;
}
// case: equal weights, take node with smaller vID
else {
const int COMPARE_T_D = oldDst[COMPARE_T_ID];
const int CUR_T_D = oldDst[curTID];
if(COMPARE_T_D < CUR_T_D) {
strongerIndex = COMPARE_T_ID;
} else {
strongerIndex = curTID;
};
}
//Set new destination
newDst[curTID] = oldDst[strongerIndex];
//Set new weight
newWeight[curTID] = oldWeight[strongerIndex];
if(newDst[curTID] != oldDst[curTID]) { *madeChanges = 1; };
}
// case : different segment
else {
// defaults to no change
newDst[curTID] = oldDst[curTID];
newWeight[curTID] = oldWeight[curTID];
}
}
}
/**
* During each iteration of parallel segment-scan, each (independent) task picks two elements with a stride s,
* checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
* A parallel segment-scan may involve multiple iterations,
* the first iteration uses stride s = 1 and the stride s doubles at every iteration.
*/ | /**
* @file strongestNeighborScan.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
/**
* Scans input in parallel picks two elements with a stride s, checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
*
* @param src input array that denotes each segment in the graph
* @param oldDst input array that denotes the destination of each edge in src
* @param newDst output array to be modified with new greatest destinatuon
* @param oldWeight input array that denotes the weight of each edge in src
* @param newWeight output array to be modified with new greatest edge weight
* @param madeChanges integer flag for any changed made by function
* @param distance stride distance
* @param numEdges the number of edges/elements in the above arrays
*/
__global__ void strongestNeighborScan_gpu(
int * src,
int * oldDst, int * newDst,
int * oldWeight, int * newWeight,
int * madeChanges,
int distance,
int numEdges
) {
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID < numEdges; curTID += NUM_THREADS) {
// get compare thread index, enforce 0 bound
const int COMPARE_T_ID = curTID - distance > 0 ? curTID - distance : 0;
// case : shared segment
if( src[COMPARE_T_ID] == src[curTID]) {
int strongerIndex;
const int COMPARE_T_WEIGHT = oldWeight[COMPARE_T_ID];
const int CUR_T_WEIGHT = oldWeight[curTID];
if(COMPARE_T_WEIGHT > CUR_T_WEIGHT) {
strongerIndex = COMPARE_T_ID;
}
else if(COMPARE_T_WEIGHT < CUR_T_WEIGHT) {
strongerIndex = curTID;
}
// case: equal weights, take node with smaller vID
else {
const int COMPARE_T_D = oldDst[COMPARE_T_ID];
const int CUR_T_D = oldDst[curTID];
if(COMPARE_T_D < CUR_T_D) {
strongerIndex = COMPARE_T_ID;
} else {
strongerIndex = curTID;
};
}
//Set new destination
newDst[curTID] = oldDst[strongerIndex];
//Set new weight
newWeight[curTID] = oldWeight[strongerIndex];
if(newDst[curTID] != oldDst[curTID]) { *madeChanges = 1; };
}
// case : different segment
else {
// defaults to no change
newDst[curTID] = oldDst[curTID];
newWeight[curTID] = oldWeight[curTID];
}
}
}
/**
* During each iteration of parallel segment-scan, each (independent) task picks two elements with a stride s,
* checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
* A parallel segment-scan may involve multiple iterations,
* the first iteration uses stride s = 1 and the stride s doubles at every iteration.
*/ |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /**
* @file strongestNeighborScan.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
/**
* Scans input in parallel picks two elements with a stride s, checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
*
* @param src input array that denotes each segment in the graph
* @param oldDst input array that denotes the destination of each edge in src
* @param newDst output array to be modified with new greatest destinatuon
* @param oldWeight input array that denotes the weight of each edge in src
* @param newWeight output array to be modified with new greatest edge weight
* @param madeChanges integer flag for any changed made by function
* @param distance stride distance
* @param numEdges the number of edges/elements in the above arrays
*/
__global__ void strongestNeighborScan_gpu(
int * src,
int * oldDst, int * newDst,
int * oldWeight, int * newWeight,
int * madeChanges,
int distance,
int numEdges
) {
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID < numEdges; curTID += NUM_THREADS) {
// get compare thread index, enforce 0 bound
const int COMPARE_T_ID = curTID - distance > 0 ? curTID - distance : 0;
// case : shared segment
if( src[COMPARE_T_ID] == src[curTID]) {
int strongerIndex;
const int COMPARE_T_WEIGHT = oldWeight[COMPARE_T_ID];
const int CUR_T_WEIGHT = oldWeight[curTID];
if(COMPARE_T_WEIGHT > CUR_T_WEIGHT) {
strongerIndex = COMPARE_T_ID;
}
else if(COMPARE_T_WEIGHT < CUR_T_WEIGHT) {
strongerIndex = curTID;
}
// case: equal weights, take node with smaller vID
else {
const int COMPARE_T_D = oldDst[COMPARE_T_ID];
const int CUR_T_D = oldDst[curTID];
if(COMPARE_T_D < CUR_T_D) {
strongerIndex = COMPARE_T_ID;
} else {
strongerIndex = curTID;
};
}
//Set new destination
newDst[curTID] = oldDst[strongerIndex];
//Set new weight
newWeight[curTID] = oldWeight[strongerIndex];
if(newDst[curTID] != oldDst[curTID]) { *madeChanges = 1; };
}
// case : different segment
else {
// defaults to no change
newDst[curTID] = oldDst[curTID];
newWeight[curTID] = oldWeight[curTID];
}
}
}
/**
* During each iteration of parallel segment-scan, each (independent) task picks two elements with a stride s,
* checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
* A parallel segment-scan may involve multiple iterations,
* the first iteration uses stride s = 1 and the stride s doubles at every iteration.
*/ | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.globl _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.p2align 8
.type _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii,@function
_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x44
s_load_b64 s[2:3], s[0:1], 0x34
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s4, 16
s_and_b32 s4, s4, 0xffff
v_mad_u64_u32 v[2:3], null, s15, s5, v[1:2]
s_mul_i32 s16, s3, s4
s_mul_i32 s14, s14, s4
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, v2, s16
v_add3_u32 v0, s14, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v0
s_cbranch_execz .LBB0_13
s_clause 0x2
s_load_b32 s3, s[0:1], 0x30
s_load_b128 s[12:15], s[0:1], 0x20
s_load_b256 s[4:11], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v7, 1
s_ashr_i32 s17, s16, 31
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_2)
v_lshlrev_b64 v[1:2], 2, v[0:1]
s_lshl_b64 s[18:19], s[16:17], 2
s_waitcnt lgkmcnt(0)
s_sub_i32 s3, 0, s3
s_branch .LBB0_4
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s17
.LBB0_3:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_or_b32 exec_lo, exec_lo, s0
v_add_nc_u32_e32 v0, s16, v0
v_add_co_u32 v1, s0, v1, s18
v_add_co_ci_u32_e64 v2, s0, s19, v2, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s2, v0
s_or_b32 s1, vcc_lo, s1
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_13
.LBB0_4:
v_add_nc_u32_e32 v3, s3, v0
s_mov_b32 s0, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_max_i32_e32 v3, 0, v3
v_lshlrev_b64 v[5:6], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s4, v5
v_add_co_ci_u32_e32 v9, vcc_lo, s5, v6, vcc_lo
v_add_co_u32 v10, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v11, vcc_lo, s5, v2, vcc_lo
s_clause 0x1
global_load_b32 v8, v[8:9], off
global_load_b32 v9, v[10:11], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e64 v8, v9
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_6
v_add_co_u32 v5, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v2, vcc_lo
global_load_b32 v3, v[5:6], off
v_add_co_u32 v5, vcc_lo, s8, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s9, v2, vcc_lo
v_add_co_u32 v8, vcc_lo, s10, v1
v_add_co_ci_u32_e32 v9, vcc_lo, s11, v2, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[5:6], v3, off
global_load_b32 v3, v[8:9], off
v_add_co_u32 v5, vcc_lo, s12, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s13, v2, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[5:6], v3, off
.LBB0_6:
s_and_not1_saveexec_b32 s0, s0
s_cbranch_execz .LBB0_3
v_add_co_u32 v8, vcc_lo, s10, v5
v_add_co_ci_u32_e32 v9, vcc_lo, s11, v6, vcc_lo
v_add_co_u32 v10, vcc_lo, s10, v1
v_add_co_ci_u32_e32 v11, vcc_lo, s11, v2, vcc_lo
s_mov_b32 s17, exec_lo
s_clause 0x1
global_load_b32 v8, v[8:9], off
global_load_b32 v9, v[10:11], off
s_waitcnt vmcnt(0)
v_cmpx_le_i32_e64 v8, v9
s_cbranch_execz .LBB0_11
v_cmp_ge_i32_e32 vcc_lo, v8, v9
v_mov_b32_e32 v8, v0
s_and_saveexec_b32 s20, vcc_lo
s_cbranch_execz .LBB0_10
v_add_co_u32 v5, vcc_lo, s6, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo
v_add_co_u32 v8, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v5, v[5:6], off
global_load_b32 v6, v[8:9], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v5, v6
v_cndmask_b32_e32 v8, v0, v3, vcc_lo
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s20
s_delay_alu instid0(VALU_DEP_1)
v_mov_b32_e32 v3, v8
.LBB0_11:
s_or_b32 exec_lo, exec_lo, s17
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v3
v_mov_b32_e32 v5, v3
s_mov_b32 s17, exec_lo
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s6, v5
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v6, vcc_lo
global_load_b32 v3, v[8:9], off
v_add_co_u32 v8, vcc_lo, s8, v1
v_add_co_ci_u32_e32 v9, vcc_lo, s9, v2, vcc_lo
v_add_co_u32 v5, vcc_lo, s10, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s11, v6, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[8:9], v3, off
global_load_b32 v3, v[5:6], off
v_add_co_u32 v5, vcc_lo, s12, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s13, v2, vcc_lo
v_add_co_u32 v10, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v2, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[5:6], v3, off
global_load_b32 v3, v[8:9], off
global_load_b32 v5, v[10:11], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e64 v3, v5
s_cbranch_execz .LBB0_2
global_store_b32 v4, v7, s[14:15]
s_branch .LBB0_2
.LBB0_13:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 21
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, .Lfunc_end0-_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: by_value
- .offset: 52
.size: 4
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: hidden_block_count_x
- .offset: 60
.size: 4
.value_kind: hidden_block_count_y
- .offset: 64
.size: 4
.value_kind: hidden_block_count_z
- .offset: 68
.size: 2
.value_kind: hidden_group_size_x
- .offset: 70
.size: 2
.value_kind: hidden_group_size_y
- .offset: 72
.size: 2
.value_kind: hidden_group_size_z
- .offset: 74
.size: 2
.value_kind: hidden_remainder_x
- .offset: 76
.size: 2
.value_kind: hidden_remainder_y
- .offset: 78
.size: 2
.value_kind: hidden_remainder_z
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 120
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 23
.sgpr_spill_count: 0
.symbol: _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /**
* @file strongestNeighborScan.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
/**
* Scans input in parallel picks two elements with a stride s, checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
*
* @param src input array that denotes each segment in the graph
* @param oldDst input array that denotes the destination of each edge in src
* @param newDst output array to be modified with new greatest destinatuon
* @param oldWeight input array that denotes the weight of each edge in src
* @param newWeight output array to be modified with new greatest edge weight
* @param madeChanges integer flag for any changed made by function
* @param distance stride distance
* @param numEdges the number of edges/elements in the above arrays
*/
__global__ void strongestNeighborScan_gpu(
int * src,
int * oldDst, int * newDst,
int * oldWeight, int * newWeight,
int * madeChanges,
int distance,
int numEdges
) {
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID < numEdges; curTID += NUM_THREADS) {
// get compare thread index, enforce 0 bound
const int COMPARE_T_ID = curTID - distance > 0 ? curTID - distance : 0;
// case : shared segment
if( src[COMPARE_T_ID] == src[curTID]) {
int strongerIndex;
const int COMPARE_T_WEIGHT = oldWeight[COMPARE_T_ID];
const int CUR_T_WEIGHT = oldWeight[curTID];
if(COMPARE_T_WEIGHT > CUR_T_WEIGHT) {
strongerIndex = COMPARE_T_ID;
}
else if(COMPARE_T_WEIGHT < CUR_T_WEIGHT) {
strongerIndex = curTID;
}
// case: equal weights, take node with smaller vID
else {
const int COMPARE_T_D = oldDst[COMPARE_T_ID];
const int CUR_T_D = oldDst[curTID];
if(COMPARE_T_D < CUR_T_D) {
strongerIndex = COMPARE_T_ID;
} else {
strongerIndex = curTID;
};
}
//Set new destination
newDst[curTID] = oldDst[strongerIndex];
//Set new weight
newWeight[curTID] = oldWeight[strongerIndex];
if(newDst[curTID] != oldDst[curTID]) { *madeChanges = 1; };
}
// case : different segment
else {
// defaults to no change
newDst[curTID] = oldDst[curTID];
newWeight[curTID] = oldWeight[curTID];
}
}
}
/**
* During each iteration of parallel segment-scan, each (independent) task picks two elements with a stride s,
* checks if these two elements are in the same segment;
* if so, it compares the two elements, store the maximum one in the appropriate location in the output array.
* A parallel segment-scan may involve multiple iterations,
* the first iteration uses stride s = 1 and the stride s doubles at every iteration.
*/ | .text
.file "strongestNeighborScan.hip"
.globl _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii # -- Begin function _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii
.p2align 4, 0x90
.type _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii,@function
_Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii: # @_Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii, .Lfunc_end0-_Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii,@object # @_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.section .rodata,"a",@progbits
.globl _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.p2align 3, 0x0
_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii:
.quad _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii
.size _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii"
.size .L__unnamed_1, 44
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0a7624 */
/* 0x000fc600078e00ff */
/*0030*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0040*/ IMAD R0, R10, c[0x0][0xc], RZ ; /* 0x000003000a007a24 */
/* 0x000fc600078e02ff */
/*0050*/ S2R R8, SR_CTAID.Y ; /* 0x0000000000087919 */
/* 0x000e680000002600 */
/*0060*/ S2R R11, SR_TID.Y ; /* 0x00000000000b7919 */
/* 0x000e620000002200 */
/*0070*/ IMAD R3, R6, c[0x0][0x0], R9 ; /* 0x0000000006037a24 */
/* 0x001fe400078e0209 */
/*0080*/ IMAD R2, R8, c[0x0][0x4], R11 ; /* 0x0000010008027a24 */
/* 0x002fc800078e020b */
/*0090*/ IMAD R2, R0, R2, R3 ; /* 0x0000000200027224 */
/* 0x000fca00078e0203 */
/*00a0*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x194], PT ; /* 0x0000650002007a0c */
/* 0x000fda0003f06270 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ I2F.U32.RP R7, R0 ; /* 0x0000000000077306 */
/* 0x000e220000209000 */
/*00d0*/ IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0a08 */
/*00e0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f45070 */
/*00f0*/ IMAD.MOV R12, RZ, RZ, -R0 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e0a00 */
/*0100*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0110*/ IMAD R3, R8, c[0x0][0x4], -R11 ; /* 0x0000010008037a24 */
/* 0x000fe200078e0a0b */
/*0120*/ BSSY B0, 0x670 ; /* 0x0000054000007945 */
/* 0x000fe20003800000 */
/*0130*/ IMAD.MOV.U32 R11, RZ, RZ, R12 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e000c */
/*0140*/ IMAD R3, R3, c[0x0][0xc], -R6 ; /* 0x0000030003037a24 */
/* 0x000fe200078e0a06 */
/*0150*/ LOP3.LUT R6, RZ, R9, RZ, 0x33, !PT ; /* 0x00000009ff067212 */
/* 0x000fc600078e33ff */
/*0160*/ IMAD R3, R3, R10, c[0x0][0x194] ; /* 0x0000650003037624 */
/* 0x000fe200078e020a */
/*0170*/ MUFU.RCP R7, R7 ; /* 0x0000000700077308 */
/* 0x001e260000001000 */
/*0180*/ IMAD.IADD R3, R3, 0x1, R6 ; /* 0x0000000103037824 */
/* 0x000fe200078e0206 */
/*0190*/ IADD3 R4, R7, 0xffffffe, RZ ; /* 0x0ffffffe07047810 */
/* 0x001fc80007ffe0ff */
/*01a0*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*01b0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x001fe400078e00ff */
/*01c0*/ IMAD R11, R11, R5, RZ ; /* 0x000000050b0b7224 */
/* 0x002fc800078e02ff */
/*01d0*/ IMAD.HI.U32 R8, R5, R11, R4 ; /* 0x0000000b05087227 */
/* 0x000fcc00078e0004 */
/*01e0*/ IMAD.HI.U32 R8, R8, R3, RZ ; /* 0x0000000308087227 */
/* 0x000fc800078e00ff */
/*01f0*/ IMAD.MOV R5, RZ, RZ, -R8 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a08 */
/*0200*/ IMAD R5, R0, R5, R3 ; /* 0x0000000500057224 */
/* 0x000fca00078e0203 */
/*0210*/ ISETP.GE.U32.AND P0, PT, R5, R0, PT ; /* 0x000000000500720c */
/* 0x000fda0003f06070 */
/*0220*/ @P0 IMAD.IADD R5, R5, 0x1, -R0 ; /* 0x0000000105050824 */
/* 0x000fe200078e0a00 */
/*0230*/ @P0 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108080810 */
/* 0x000fc80007ffe0ff */
/*0240*/ ISETP.GE.U32.AND P1, PT, R5, R0, PT ; /* 0x000000000500720c */
/* 0x000fda0003f26070 */
/*0250*/ @P1 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108081810 */
/* 0x000fe40007ffe0ff */
/*0260*/ @!P2 LOP3.LUT R8, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff08a212 */
/* 0x000fc800078e33ff */
/*0270*/ LOP3.LUT R8, R8, 0x1, RZ, 0xc0, !PT ; /* 0x0000000108087812 */
/* 0x000fc800078ec0ff */
/*0280*/ ISETP.NE.U32.AND P0, PT, R8, 0x1, PT ; /* 0x000000010800780c */
/* 0x000fda0003f05070 */
/*0290*/ @!P0 BRA 0x660 ; /* 0x000003c000008947 */
/* 0x000fea0003800000 */
/*02a0*/ IADD3 R6, R2, -c[0x0][0x190], RZ ; /* 0x8000640002067a10 */
/* 0x000fe20007ffe0ff */
/*02b0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fc600078e00ff */
/*02c0*/ IMNMX R6, RZ, R6, !PT ; /* 0x00000006ff067217 */
/* 0x000fe20007800200 */
/*02d0*/ IMAD.WIDE R14, R2, R7, c[0x0][0x160] ; /* 0x00005800020e7625 */
/* 0x000fc800078e0207 */
/*02e0*/ IMAD.WIDE R10, R6, R7.reuse, c[0x0][0x160] ; /* 0x00005800060a7625 */
/* 0x080fe400078e0207 */
/*02f0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea8000c1e1900 */
/*0300*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000ea2000c1e1900 */
/*0310*/ IMAD.WIDE R4, R2, R7, c[0x0][0x168] ; /* 0x00005a0002047625 */
/* 0x000fe200078e0207 */
/*0320*/ ISETP.NE.AND P0, PT, R11, R14, PT ; /* 0x0000000e0b00720c */
/* 0x004fda0003f05270 */
/*0330*/ @P0 LDG.E R19, [R4.64] ; /* 0x0000000404130981 */
/* 0x000ea2000c1e1900 */
/*0340*/ IMAD.WIDE R16, R2, R7, c[0x0][0x170] ; /* 0x00005c0002107625 */
/* 0x000fc800078e0207 */
/*0350*/ IMAD.WIDE R12, R2.reuse, R7.reuse, c[0x0][0x178] ; /* 0x00005e00020c7625 */
/* 0x0c0fe200078e0207 */
/*0360*/ @P0 STG.E [R16.64], R19 ; /* 0x0000001310000986 */
/* 0x0041e8000c101904 */
/*0370*/ @P0 LDG.E R21, [R12.64] ; /* 0x000000040c150981 */
/* 0x000ea2000c1e1900 */
/*0380*/ IMAD.WIDE R8, R2, R7, c[0x0][0x180] ; /* 0x0000600002087625 */
/* 0x000fe200078e0207 */
/*0390*/ BSSY B1, 0x650 ; /* 0x000002b000017945 */
/* 0x000fe80003800000 */
/*03a0*/ @P0 STG.E [R8.64], R21 ; /* 0x0000001508000986 */
/* 0x0041e2000c101904 */
/*03b0*/ @P0 BRA 0x640 ; /* 0x0000028000000947 */
/* 0x000fea0003800000 */
/*03c0*/ IMAD.SHL.U32 R16, R6.reuse, 0x4, RZ ; /* 0x0000000406107824 */
/* 0x041fe200078e00ff */
/*03d0*/ SHF.R.S32.HI R15, RZ, 0x1f, R6 ; /* 0x0000001fff0f7819 */
/* 0x000fe20000011406 */
/*03e0*/ LDG.E R17, [R12.64] ; /* 0x000000040c117981 */
/* 0x000ea6000c1e1900 */
/*03f0*/ SHF.L.U64.HI R19, R6, 0x2, R15 ; /* 0x0000000206137819 */
/* 0x000fc4000001020f */
/*0400*/ IADD3 R10, P0, R16, c[0x0][0x178], RZ ; /* 0x00005e00100a7a10 */
/* 0x000fc80007f1e0ff */
/*0410*/ IADD3.X R11, R19, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f00130b7a10 */
/* 0x000fca00007fe4ff */
/*0420*/ LDG.E R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea2000c1e1900 */
/*0430*/ BSSY B2, 0x560 ; /* 0x0000012000027945 */
/* 0x000fe20003800000 */
/*0440*/ IMAD.MOV.U32 R14, RZ, RZ, R6 ; /* 0x000000ffff0e7224 */
/* 0x000fe200078e0006 */
/*0450*/ ISETP.GT.AND P0, PT, R18, R17, PT ; /* 0x000000111200720c */
/* 0x004fda0003f04270 */
/*0460*/ @P0 BRA 0x550 ; /* 0x000000e000000947 */
/* 0x000fea0003800000 */
/*0470*/ ISETP.GE.AND P0, PT, R18, R17, PT ; /* 0x000000111200720c */
/* 0x000fe20003f06270 */
/*0480*/ IMAD.MOV.U32 R11, RZ, RZ, R13 ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e000d */
/*0490*/ SHF.R.S32.HI R15, RZ, 0x1f, R2.reuse ; /* 0x0000001fff0f7819 */
/* 0x100fe20000011402 */
/*04a0*/ IMAD.MOV.U32 R14, RZ, RZ, R2 ; /* 0x000000ffff0e7224 */
/* 0x000fe200078e0002 */
/*04b0*/ MOV R10, R12 ; /* 0x0000000c000a7202 */
/* 0x000fd20000000f00 */
/*04c0*/ @!P0 BRA 0x550 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*04d0*/ IADD3 R12, P0, R16, c[0x0][0x168], RZ ; /* 0x00005a00100c7a10 */
/* 0x000fe20007f1e0ff */
/*04e0*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */
/* 0x000ea6000c1e1900 */
/*04f0*/ IADD3.X R13, R19, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b00130d7a10 */
/* 0x000fcc00007fe4ff */
/*0500*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea4000c1e1900 */
/*0510*/ ISETP.GE.AND P0, PT, R13, R10, PT ; /* 0x0000000a0d00720c */
/* 0x004fc80003f06270 */
/*0520*/ SEL R14, R6, R2, !P0 ; /* 0x00000002060e7207 */
/* 0x000fc80004000000 */
/*0530*/ SHF.R.S32.HI R15, RZ, 0x1f, R14 ; /* 0x0000001fff0f7819 */
/* 0x000fe2000001140e */
/*0540*/ IMAD.WIDE R10, R14, R7, c[0x0][0x178] ; /* 0x00005e000e0a7625 */
/* 0x000fc800078e0207 */
/*0550*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0560*/ LEA R16, P0, R14, c[0x0][0x168], 0x2 ; /* 0x00005a000e107a11 */
/* 0x000fc800078010ff */
/*0570*/ LEA.HI.X R17, R14, c[0x0][0x16c], R15, 0x2, P0 ; /* 0x00005b000e117a11 */
/* 0x000fcc00000f140f */
/*0580*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ea2000c1e1900 */
/*0590*/ IMAD.WIDE R6, R2, R7, c[0x0][0x170] ; /* 0x00005c0002067625 */
/* 0x000fca00078e0207 */
/*05a0*/ STG.E [R6.64], R17 ; /* 0x0000001106007986 */
/* 0x0041e8000c101904 */
/*05b0*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000ea8000c1e1900 */
/*05c0*/ STG.E [R8.64], R11 ; /* 0x0000000b08007986 */
/* 0x0041e8000c101904 */
/*05d0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*05e0*/ LDG.E R13, [R6.64] ; /* 0x00000004060d7981 */
/* 0x000ea4000c1e1900 */
/*05f0*/ ISETP.NE.AND P0, PT, R13, R4, PT ; /* 0x000000040d00720c */
/* 0x004fda0003f05270 */
/*0600*/ @P0 IMAD.MOV.U32 R15, RZ, RZ, 0x1 ; /* 0x00000001ff0f0424 */
/* 0x000fe400078e00ff */
/*0610*/ @P0 IMAD.MOV.U32 R12, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff0c0624 */
/* 0x000fe400078e00ff */
/*0620*/ @P0 IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff0d0624 */
/* 0x000fca00078e00ff */
/*0630*/ @P0 STG.E [R12.64], R15 ; /* 0x0000000f0c000986 */
/* 0x0001e4000c101904 */
/*0640*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0650*/ IMAD.IADD R2, R0, 0x1, R2 ; /* 0x0000000100027824 */
/* 0x000fe400078e0202 */
/*0660*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0670*/ ISETP.GT.U32.AND P0, PT, R0, R3, PT ; /* 0x000000030000720c */
/* 0x000fda0003f04070 */
/*0680*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0690*/ IADD3 R16, R2, -c[0x0][0x190], RZ ; /* 0x8000640002107a10 */
/* 0x001fe20007ffe0ff */
/*06a0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc600078e00ff */
/*06b0*/ IMNMX R16, RZ, R16, !PT ; /* 0x00000010ff107217 */
/* 0x000fe20007800200 */
/*06c0*/ IMAD.WIDE R4, R2, R3, c[0x0][0x160] ; /* 0x0000580002047625 */
/* 0x000fc800078e0203 */
/*06d0*/ IMAD.WIDE R12, R16, R3.reuse, c[0x0][0x160] ; /* 0x00005800100c7625 */
/* 0x080fe200078e0203 */
/*06e0*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000eaa000c1e1900 */
/*06f0*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea4000c1e1900 */
/*0700*/ ISETP.NE.AND P0, PT, R13, R6, PT ; /* 0x000000060d00720c */
/* 0x004fe20003f05270 */
/*0710*/ IMAD.WIDE R6, R2, R3, c[0x0][0x168] ; /* 0x00005a0002067625 */
/* 0x000fd800078e0203 */
/*0720*/ @P0 LDG.E R17, [R6.64] ; /* 0x0000000406110981 */
/* 0x000ea2000c1e1900 */
/*0730*/ IMAD.WIDE R8, R2, R3, c[0x0][0x170] ; /* 0x00005c0002087625 */
/* 0x000fc800078e0203 */
/*0740*/ IMAD.WIDE R14, R2.reuse, R3.reuse, c[0x0][0x178] ; /* 0x00005e00020e7625 */
/* 0x0c0fe200078e0203 */
/*0750*/ @P0 STG.E [R8.64], R17 ; /* 0x0000001108000986 */
/* 0x0041e8000c101904 */
/*0760*/ @P0 LDG.E R19, [R14.64] ; /* 0x000000040e130981 */
/* 0x000ea2000c1e1900 */
/*0770*/ IMAD.WIDE R10, R2, R3, c[0x0][0x180] ; /* 0x00006000020a7625 */
/* 0x000fe200078e0203 */
/*0780*/ BSSY B0, 0xa30 ; /* 0x000002a000007945 */
/* 0x000fe80003800000 */
/*0790*/ @P0 STG.E [R10.64], R19 ; /* 0x000000130a000986 */
/* 0x0041e2000c101904 */
/*07a0*/ @P0 BRA 0xa20 ; /* 0x0000027000000947 */
/* 0x000fea0003800000 */
/*07b0*/ IMAD.SHL.U32 R17, R16.reuse, 0x4, RZ ; /* 0x0000000410117824 */
/* 0x041fe200078e00ff */
/*07c0*/ SHF.R.S32.HI R19, RZ, 0x1f, R16 ; /* 0x0000001fff137819 */
/* 0x000fe20000011410 */
/*07d0*/ LDG.E R20, [R14.64] ; /* 0x000000040e147981 */
/* 0x000ea6000c1e1900 */
/*07e0*/ SHF.L.U64.HI R22, R16, 0x2, R19 ; /* 0x0000000210167819 */
/* 0x000fc40000010213 */
/*07f0*/ IADD3 R12, P0, R17, c[0x0][0x178], RZ ; /* 0x00005e00110c7a10 */
/* 0x000fc80007f1e0ff */
/*0800*/ IADD3.X R13, R22, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f00160d7a10 */
/* 0x000fca00007fe4ff */
/*0810*/ LDG.E R21, [R12.64] ; /* 0x000000040c157981 */
/* 0x000ea2000c1e1900 */
/*0820*/ BSSY B1, 0x950 ; /* 0x0000012000017945 */
/* 0x000fe20003800000 */
/*0830*/ MOV R18, R16 ; /* 0x0000001000127202 */
/* 0x000fe40000000f00 */
/*0840*/ ISETP.GT.AND P0, PT, R21, R20, PT ; /* 0x000000141500720c */
/* 0x004fda0003f04270 */
/*0850*/ @P0 BRA 0x940 ; /* 0x000000e000000947 */
/* 0x000fea0003800000 */
/*0860*/ ISETP.GE.AND P0, PT, R21, R20, PT ; /* 0x000000141500720c */
/* 0x000fe20003f06270 */
/*0870*/ IMAD.MOV.U32 R12, RZ, RZ, R14 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e000e */
/*0880*/ SHF.R.S32.HI R19, RZ, 0x1f, R2.reuse ; /* 0x0000001fff137819 */
/* 0x100fe20000011402 */
/*0890*/ IMAD.MOV.U32 R13, RZ, RZ, R15 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e000f */
/*08a0*/ IMAD.MOV.U32 R18, RZ, RZ, R2 ; /* 0x000000ffff127224 */
/* 0x000fd000078e0002 */
/*08b0*/ @!P0 BRA 0x940 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*08c0*/ IADD3 R14, P0, R17, c[0x0][0x168], RZ ; /* 0x00005a00110e7a10 */
/* 0x000fe20007f1e0ff */
/*08d0*/ LDG.E R12, [R6.64] ; /* 0x00000004060c7981 */
/* 0x000ea6000c1e1900 */
/*08e0*/ IADD3.X R15, R22, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b00160f7a10 */
/* 0x000fcc00007fe4ff */
/*08f0*/ LDG.E R15, [R14.64] ; /* 0x000000040e0f7981 */
/* 0x000ea4000c1e1900 */
/*0900*/ ISETP.GE.AND P0, PT, R15, R12, PT ; /* 0x0000000c0f00720c */
/* 0x004fc80003f06270 */
/*0910*/ SEL R18, R16, R2, !P0 ; /* 0x0000000210127207 */
/* 0x000fc80004000000 */
/*0920*/ SHF.R.S32.HI R19, RZ, 0x1f, R18 ; /* 0x0000001fff137819 */
/* 0x000fe20000011412 */
/*0930*/ IMAD.WIDE R12, R18, R3, c[0x0][0x178] ; /* 0x00005e00120c7625 */
/* 0x000fc800078e0203 */
/*0940*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0950*/ LEA R14, P0, R18, c[0x0][0x168], 0x2 ; /* 0x00005a00120e7a11 */
/* 0x000fc800078010ff */
/*0960*/ LEA.HI.X R15, R18, c[0x0][0x16c], R19, 0x2, P0 ; /* 0x00005b00120f7a11 */
/* 0x000fcc00000f1413 */
/*0970*/ LDG.E R15, [R14.64] ; /* 0x000000040e0f7981 */
/* 0x000ea8000c1e1900 */
/*0980*/ STG.E [R8.64], R15 ; /* 0x0000000f08007986 */
/* 0x0041e8000c101904 */
/*0990*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea8000c1e1900 */
/*09a0*/ STG.E [R10.64], R13 ; /* 0x0000000d0a007986 */
/* 0x0041e8000c101904 */
/*09b0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea8000c1e1900 */
/*09c0*/ LDG.E R17, [R8.64] ; /* 0x0000000408117981 */
/* 0x000ea4000c1e1900 */
/*09d0*/ ISETP.NE.AND P0, PT, R17, R6, PT ; /* 0x000000061100720c */
/* 0x004fda0003f05270 */
/*09e0*/ @P0 IMAD.MOV.U32 R19, RZ, RZ, 0x1 ; /* 0x00000001ff130424 */
/* 0x000fe400078e00ff */
/*09f0*/ @P0 IMAD.MOV.U32 R16, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff100624 */
/* 0x000fe400078e00ff */
/*0a00*/ @P0 IMAD.MOV.U32 R17, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff110624 */
/* 0x000fca00078e00ff */
/*0a10*/ @P0 STG.E [R16.64], R19 ; /* 0x0000001310000986 */
/* 0x0001e4000c101904 */
/*0a20*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0a30*/ IMAD.IADD R2, R0.reuse, 0x1, R2 ; /* 0x0000000100027824 */
/* 0x040fe400078e0202 */
/*0a40*/ IMAD.WIDE R16, R0, 0x4, R4 ; /* 0x0000000400107825 */
/* 0x001fc600078e0204 */
/*0a50*/ IADD3 R10, R2, -c[0x0][0x190], RZ ; /* 0x80006400020a7a10 */
/* 0x000fc60007ffe0ff */
/*0a60*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ea2000c1e1900 */
/*0a70*/ IMNMX R10, RZ, R10, !PT ; /* 0x0000000aff0a7217 */
/* 0x000fca0007800200 */
/*0a80*/ IMAD.WIDE R12, R10, R3, c[0x0][0x160] ; /* 0x000058000a0c7625 */
/* 0x000fcc00078e0203 */
/*0a90*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea2000c1e1900 */
/*0aa0*/ IMAD.WIDE R4, R2, R3, c[0x0][0x168] ; /* 0x00005a0002047625 */
/* 0x000fe200078e0203 */
/*0ab0*/ ISETP.NE.AND P0, PT, R13, R16, PT ; /* 0x000000100d00720c */
/* 0x004fda0003f05270 */
/*0ac0*/ @P0 LDG.E R11, [R4.64] ; /* 0x00000004040b0981 */
/* 0x000ea2000c1e1900 */
/*0ad0*/ IMAD.WIDE R6, R2, R3, c[0x0][0x170] ; /* 0x00005c0002067625 */
/* 0x000fc800078e0203 */
/*0ae0*/ IMAD.WIDE R14, R2.reuse, R3.reuse, c[0x0][0x178] ; /* 0x00005e00020e7625 */
/* 0x0c0fe200078e0203 */
/*0af0*/ @P0 STG.E [R6.64], R11 ; /* 0x0000000b06000986 */
/* 0x0041e8000c101904 */
/*0b00*/ @P0 LDG.E R19, [R14.64] ; /* 0x000000040e130981 */
/* 0x000ea2000c1e1900 */
/*0b10*/ IMAD.WIDE R8, R2, R3, c[0x0][0x180] ; /* 0x0000600002087625 */
/* 0x000fe200078e0203 */
/*0b20*/ BSSY B0, 0xdd0 ; /* 0x000002a000007945 */
/* 0x000fe80003800000 */
/*0b30*/ @P0 STG.E [R8.64], R19 ; /* 0x0000001308000986 */
/* 0x0041e2000c101904 */
/*0b40*/ @P0 BRA 0xdc0 ; /* 0x0000027000000947 */
/* 0x000fea0003800000 */
/*0b50*/ IMAD.SHL.U32 R11, R10.reuse, 0x4, RZ ; /* 0x000000040a0b7824 */
/* 0x041fe200078e00ff */
/*0b60*/ SHF.R.S32.HI R17, RZ, 0x1f, R10 ; /* 0x0000001fff117819 */
/* 0x000fe2000001140a */
/*0b70*/ LDG.E R18, [R14.64] ; /* 0x000000040e127981 */
/* 0x000ea6000c1e1900 */
/*0b80*/ SHF.L.U64.HI R20, R10, 0x2, R17 ; /* 0x000000020a147819 */
/* 0x000fc40000010211 */
/*0b90*/ IADD3 R12, P0, R11, c[0x0][0x178], RZ ; /* 0x00005e000b0c7a10 */
/* 0x000fc80007f1e0ff */
/*0ba0*/ IADD3.X R13, R20, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f00140d7a10 */
/* 0x000fca00007fe4ff */
/*0bb0*/ LDG.E R19, [R12.64] ; /* 0x000000040c137981 */
/* 0x000ea2000c1e1900 */
/*0bc0*/ BSSY B1, 0xcf0 ; /* 0x0000012000017945 */
/* 0x000fe20003800000 */
/*0bd0*/ MOV R16, R10 ; /* 0x0000000a00107202 */
/* 0x000fe40000000f00 */
/*0be0*/ ISETP.GT.AND P0, PT, R19, R18, PT ; /* 0x000000121300720c */
/* 0x004fda0003f04270 */
/*0bf0*/ @P0 BRA 0xce0 ; /* 0x000000e000000947 */
/* 0x000fea0003800000 */
/*0c00*/ ISETP.GE.AND P0, PT, R19, R18, PT ; /* 0x000000121300720c */
/* 0x000fe20003f06270 */
/*0c10*/ IMAD.MOV.U32 R12, RZ, RZ, R14 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e000e */
/*0c20*/ SHF.R.S32.HI R17, RZ, 0x1f, R2.reuse ; /* 0x0000001fff117819 */
/* 0x100fe20000011402 */
/*0c30*/ IMAD.MOV.U32 R13, RZ, RZ, R15 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e000f */
/*0c40*/ IMAD.MOV.U32 R16, RZ, RZ, R2 ; /* 0x000000ffff107224 */
/* 0x000fd000078e0002 */
/*0c50*/ @!P0 BRA 0xce0 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*0c60*/ IADD3 R14, P0, R11, c[0x0][0x168], RZ ; /* 0x00005a000b0e7a10 */
/* 0x000fe40007f1e0ff */
/*0c70*/ LDG.E R11, [R4.64] ; /* 0x00000004040b7981 */
/* 0x000ea4000c1e1900 */
/*0c80*/ IADD3.X R15, R20, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b00140f7a10 */
/* 0x000fca00007fe4ff */
/*0c90*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea4000c1e1900 */
/*0ca0*/ ISETP.GE.AND P0, PT, R14, R11, PT ; /* 0x0000000b0e00720c */
/* 0x004fc80003f06270 */
/*0cb0*/ SEL R16, R10, R2, !P0 ; /* 0x000000020a107207 */
/* 0x000fc80004000000 */
/*0cc0*/ SHF.R.S32.HI R17, RZ, 0x1f, R16 ; /* 0x0000001fff117819 */
/* 0x000fe20000011410 */
/*0cd0*/ IMAD.WIDE R12, R16, R3, c[0x0][0x178] ; /* 0x00005e00100c7625 */
/* 0x000fc800078e0203 */
/*0ce0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0cf0*/ LEA R10, P0, R16, c[0x0][0x168], 0x2 ; /* 0x00005a00100a7a11 */
/* 0x000fc800078010ff */
/*0d00*/ LEA.HI.X R11, R16, c[0x0][0x16c], R17, 0x2, P0 ; /* 0x00005b00100b7a11 */
/* 0x000fcc00000f1411 */
/*0d10*/ LDG.E R11, [R10.64] ; /* 0x000000040a0b7981 */
/* 0x000ea8000c1e1900 */
/*0d20*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x0041e8000c101904 */
/*0d30*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea8000c1e1900 */
/*0d40*/ STG.E [R8.64], R13 ; /* 0x0000000d08007986 */
/* 0x0041e8000c101904 */
/*0d50*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0d60*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */
/* 0x000ea4000c1e1900 */
/*0d70*/ ISETP.NE.AND P0, PT, R3, R4, PT ; /* 0x000000040300720c */
/* 0x004fda0003f05270 */
/*0d80*/ @P0 IMAD.MOV.U32 R3, RZ, RZ, 0x1 ; /* 0x00000001ff030424 */
/* 0x000fe400078e00ff */
/*0d90*/ @P0 IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x188] ; /* 0x00006200ff0e0624 */
/* 0x000fe400078e00ff */
/*0da0*/ @P0 IMAD.MOV.U32 R15, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff0f0624 */
/* 0x000fca00078e00ff */
/*0db0*/ @P0 STG.E [R14.64], R3 ; /* 0x000000030e000986 */
/* 0x0001e4000c101904 */
/*0dc0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0dd0*/ IMAD.IADD R2, R0, 0x1, R2 ; /* 0x0000000100027824 */
/* 0x000fca00078e0202 */
/*0de0*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x194], PT ; /* 0x0000650002007a0c */
/* 0x000fda0003f06270 */
/*0df0*/ @!P0 BRA 0x690 ; /* 0xfffff89000008947 */
/* 0x000fea000383ffff */
/*0e00*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0e10*/ BRA 0xe10; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0e20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ea0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0eb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ec0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ed0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ee0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ef0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.globl _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.p2align 8
.type _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii,@function
_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x44
s_load_b64 s[2:3], s[0:1], 0x34
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s4, 16
s_and_b32 s4, s4, 0xffff
v_mad_u64_u32 v[2:3], null, s15, s5, v[1:2]
s_mul_i32 s16, s3, s4
s_mul_i32 s14, s14, s4
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, v2, s16
v_add3_u32 v0, s14, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v0
s_cbranch_execz .LBB0_13
s_clause 0x2
s_load_b32 s3, s[0:1], 0x30
s_load_b128 s[12:15], s[0:1], 0x20
s_load_b256 s[4:11], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v7, 1
s_ashr_i32 s17, s16, 31
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_2)
v_lshlrev_b64 v[1:2], 2, v[0:1]
s_lshl_b64 s[18:19], s[16:17], 2
s_waitcnt lgkmcnt(0)
s_sub_i32 s3, 0, s3
s_branch .LBB0_4
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s17
.LBB0_3:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_or_b32 exec_lo, exec_lo, s0
v_add_nc_u32_e32 v0, s16, v0
v_add_co_u32 v1, s0, v1, s18
v_add_co_ci_u32_e64 v2, s0, s19, v2, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s2, v0
s_or_b32 s1, vcc_lo, s1
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_13
.LBB0_4:
v_add_nc_u32_e32 v3, s3, v0
s_mov_b32 s0, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_max_i32_e32 v3, 0, v3
v_lshlrev_b64 v[5:6], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s4, v5
v_add_co_ci_u32_e32 v9, vcc_lo, s5, v6, vcc_lo
v_add_co_u32 v10, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v11, vcc_lo, s5, v2, vcc_lo
s_clause 0x1
global_load_b32 v8, v[8:9], off
global_load_b32 v9, v[10:11], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e64 v8, v9
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB0_6
v_add_co_u32 v5, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v2, vcc_lo
global_load_b32 v3, v[5:6], off
v_add_co_u32 v5, vcc_lo, s8, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s9, v2, vcc_lo
v_add_co_u32 v8, vcc_lo, s10, v1
v_add_co_ci_u32_e32 v9, vcc_lo, s11, v2, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[5:6], v3, off
global_load_b32 v3, v[8:9], off
v_add_co_u32 v5, vcc_lo, s12, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s13, v2, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[5:6], v3, off
.LBB0_6:
s_and_not1_saveexec_b32 s0, s0
s_cbranch_execz .LBB0_3
v_add_co_u32 v8, vcc_lo, s10, v5
v_add_co_ci_u32_e32 v9, vcc_lo, s11, v6, vcc_lo
v_add_co_u32 v10, vcc_lo, s10, v1
v_add_co_ci_u32_e32 v11, vcc_lo, s11, v2, vcc_lo
s_mov_b32 s17, exec_lo
s_clause 0x1
global_load_b32 v8, v[8:9], off
global_load_b32 v9, v[10:11], off
s_waitcnt vmcnt(0)
v_cmpx_le_i32_e64 v8, v9
s_cbranch_execz .LBB0_11
v_cmp_ge_i32_e32 vcc_lo, v8, v9
v_mov_b32_e32 v8, v0
s_and_saveexec_b32 s20, vcc_lo
s_cbranch_execz .LBB0_10
v_add_co_u32 v5, vcc_lo, s6, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v6, vcc_lo
v_add_co_u32 v8, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v5, v[5:6], off
global_load_b32 v6, v[8:9], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v5, v6
v_cndmask_b32_e32 v8, v0, v3, vcc_lo
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s20
s_delay_alu instid0(VALU_DEP_1)
v_mov_b32_e32 v3, v8
.LBB0_11:
s_or_b32 exec_lo, exec_lo, s17
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v3
v_mov_b32_e32 v5, v3
s_mov_b32 s17, exec_lo
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s6, v5
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v6, vcc_lo
global_load_b32 v3, v[8:9], off
v_add_co_u32 v8, vcc_lo, s8, v1
v_add_co_ci_u32_e32 v9, vcc_lo, s9, v2, vcc_lo
v_add_co_u32 v5, vcc_lo, s10, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s11, v6, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[8:9], v3, off
global_load_b32 v3, v[5:6], off
v_add_co_u32 v5, vcc_lo, s12, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s13, v2, vcc_lo
v_add_co_u32 v10, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v2, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[5:6], v3, off
global_load_b32 v3, v[8:9], off
global_load_b32 v5, v[10:11], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e64 v3, v5
s_cbranch_execz .LBB0_2
global_store_b32 v4, v7, s[14:15]
s_branch .LBB0_2
.LBB0_13:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 21
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, .Lfunc_end0-_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: by_value
- .offset: 52
.size: 4
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: hidden_block_count_x
- .offset: 60
.size: 4
.value_kind: hidden_block_count_y
- .offset: 64
.size: 4
.value_kind: hidden_block_count_z
- .offset: 68
.size: 2
.value_kind: hidden_group_size_x
- .offset: 70
.size: 2
.value_kind: hidden_group_size_y
- .offset: 72
.size: 2
.value_kind: hidden_group_size_z
- .offset: 74
.size: 2
.value_kind: hidden_remainder_x
- .offset: 76
.size: 2
.value_kind: hidden_remainder_y
- .offset: 78
.size: 2
.value_kind: hidden_remainder_z
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 120
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 23
.sgpr_spill_count: 0
.symbol: _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00019e3c_00000000-6_strongestNeighborScan.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii
.type _Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii, @function
_Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii:
.LFB2081:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii, .-_Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii
.globl _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.type _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, @function
_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z57__device_stub__Z25strongestNeighborScan_gpuPiS_S_S_S_S_iiPiS_S_S_S_S_ii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, .-_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "strongestNeighborScan.hip"
.globl _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii # -- Begin function _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii
.p2align 4, 0x90
.type _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii,@function
_Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii: # @_Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii, .Lfunc_end0-_Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii,@object # @_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.section .rodata,"a",@progbits
.globl _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.p2align 3, 0x0
_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii:
.quad _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii
.size _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii"
.size .L__unnamed_1, 44
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z40__device_stub__strongestNeighborScan_gpuPiS_S_S_S_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z25strongestNeighborScan_gpuPiS_S_S_S_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
*To find sum of two large matrices
*Compute the speed up obtained by GPU
*/
#include <stdio.h>
const int ROW_SIZE = 300, COL_SIZE = 400;
const int MATRIX_BYTES = ROW_SIZE * COL_SIZE * sizeof(int);
//kernal
__global__ void sum(int d_sum[][ROW_SIZE], int d_a[][ROW_SIZE], int d_b[][ROW_SIZE]) {
d_sum[blockIdx.x][threadIdx.x] = d_a[blockIdx.x][threadIdx.x] + d_b[blockIdx.x][threadIdx.x];
}
//to check the final result
int checkSum(int h_a[][ROW_SIZE], int h_b[][ROW_SIZE], int h_sum[][ROW_SIZE]) {
int flag = 1;
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
if(h_sum[i][j] != h_a[i][j] + h_b[i][j]) {
flag = 0;
break;
}
}
}
return flag;
}
int main() {
int h_a[COL_SIZE][ROW_SIZE], h_b[COL_SIZE][ROW_SIZE], h_sum[COL_SIZE][ROW_SIZE];
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_a[i][j] = ((int)rand())%1000;
h_b[i][j] = ((int)rand())%1000;
}
}
int (*d_a)[ROW_SIZE], (*d_b)[ROW_SIZE], (*d_sum)[ROW_SIZE];
cudaMalloc((void**) &d_a, MATRIX_BYTES);
cudaMalloc((void**) &d_b, MATRIX_BYTES);
cudaMalloc((void**) &d_sum, MATRIX_BYTES);
cudaMemcpy(d_a, h_a, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
sum<<<COL_SIZE, ROW_SIZE>>>(d_sum, d_a, d_b);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(h_sum, d_sum, MATRIX_BYTES, cudaMemcpyDeviceToHost);
if(checkSum(h_a, h_b, h_sum)) {
printf("The result is computed successfully!\n");
cudaEventElapsedTime(&time, start, stop);
printf("Computation time taken by device: %f\n", time);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_sum[i][j] = h_a[i][j] + h_b[i][j];
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Computation time taken by host: %f\n", time);
}
else {
printf("The result is not computed correctly!");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_sum);
return 0;
} | code for sm_80
Function : _Z3sumPA300_iS0_S0_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 7.152557373046875e-05 ; /* 0x000004b0ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e6e0000002100 */
/*0050*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x001fc800078e0007 */
/*0060*/ IMAD.WIDE.U32 R4, R6, R7, c[0x0][0x170] ; /* 0x00005c0006047625 */
/* 0x000fc800078e0007 */
/*0070*/ IMAD.WIDE.U32 R2, R9, 0x4, R2 ; /* 0x0000000409027825 */
/* 0x002fc800078e0002 */
/*0080*/ IMAD.WIDE.U32 R4, R9, 0x4, R4 ; /* 0x0000000409047825 */
/* 0x000fe400078e0004 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00a0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00b0*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fcc00078e0007 */
/*00c0*/ IMAD.WIDE.U32 R6, R9, 0x4, R6 ; /* 0x0000000409067825 */
/* 0x000fe200078e0006 */
/*00d0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
*To find sum of two large matrices
*Compute the speed up obtained by GPU
*/
#include <stdio.h>
const int ROW_SIZE = 300, COL_SIZE = 400;
const int MATRIX_BYTES = ROW_SIZE * COL_SIZE * sizeof(int);
//kernal
__global__ void sum(int d_sum[][ROW_SIZE], int d_a[][ROW_SIZE], int d_b[][ROW_SIZE]) {
d_sum[blockIdx.x][threadIdx.x] = d_a[blockIdx.x][threadIdx.x] + d_b[blockIdx.x][threadIdx.x];
}
//to check the final result
int checkSum(int h_a[][ROW_SIZE], int h_b[][ROW_SIZE], int h_sum[][ROW_SIZE]) {
int flag = 1;
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
if(h_sum[i][j] != h_a[i][j] + h_b[i][j]) {
flag = 0;
break;
}
}
}
return flag;
}
int main() {
int h_a[COL_SIZE][ROW_SIZE], h_b[COL_SIZE][ROW_SIZE], h_sum[COL_SIZE][ROW_SIZE];
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_a[i][j] = ((int)rand())%1000;
h_b[i][j] = ((int)rand())%1000;
}
}
int (*d_a)[ROW_SIZE], (*d_b)[ROW_SIZE], (*d_sum)[ROW_SIZE];
cudaMalloc((void**) &d_a, MATRIX_BYTES);
cudaMalloc((void**) &d_b, MATRIX_BYTES);
cudaMalloc((void**) &d_sum, MATRIX_BYTES);
cudaMemcpy(d_a, h_a, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
sum<<<COL_SIZE, ROW_SIZE>>>(d_sum, d_a, d_b);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(h_sum, d_sum, MATRIX_BYTES, cudaMemcpyDeviceToHost);
if(checkSum(h_a, h_b, h_sum)) {
printf("The result is computed successfully!\n");
cudaEventElapsedTime(&time, start, stop);
printf("Computation time taken by device: %f\n", time);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_sum[i][j] = h_a[i][j] + h_b[i][j];
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Computation time taken by host: %f\n", time);
}
else {
printf("The result is not computed correctly!");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_sum);
return 0;
} | .file "tmpxft_000a8442_00000000-6_A0Q2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8checkSumPA300_iS0_S0_
.type _Z8checkSumPA300_iS0_S0_, @function
_Z8checkSumPA300_iS0_S0_:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
movq %rdi, %r11
movq %rsi, %r10
movq %rdx, %r9
movl $0, %r8d
movl $1, %ebx
movl $0, %ebp
jmp .L4
.L8:
movl %ebp, %ebx
.L5:
addq $1200, %r8
cmpq $480000, %r8
je .L3
.L4:
leaq (%r9,%r8), %rdi
leaq (%r11,%r8), %rsi
leaq (%r10,%r8), %rcx
movl $0, %eax
.L6:
movl (%rcx,%rax), %edx
addl (%rsi,%rax), %edx
cmpl %edx, (%rdi,%rax)
jne .L8
addq $4, %rax
cmpq $1200, %rax
jne .L6
jmp .L5
.L3:
movl %ebx, %eax
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z8checkSumPA300_iS0_S0_, .-_Z8checkSumPA300_iS0_S0_
.globl _Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_
.type _Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_, @function
_Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3sumPA300_iS0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_, .-_Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_
.globl _Z3sumPA300_iS0_S0_
.type _Z3sumPA300_iS0_S0_, @function
_Z3sumPA300_iS0_S0_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z3sumPA300_iS0_S0_, .-_Z3sumPA300_iS0_S0_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "The result is computed successfully!\n"
.align 8
.LC1:
.string "Computation time taken by device: %f\n"
.align 8
.LC2:
.string "Computation time taken by host: %f\n"
.align 8
.LC3:
.string "The result is not computed correctly!"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
leaq -1437696(%rsp), %r11
.cfi_def_cfa 11, 1437720
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $2408, %rsp
.cfi_def_cfa_offset 1440128
movq %fs:40, %rax
movq %rax, 1440088(%rsp)
xorl %eax, %eax
movl $1200, %ebp
.L20:
leaq -1200(%rbp), %rbx
.L21:
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $38, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000, %edx, %edx
subl %edx, %eax
movl %eax, 80(%rsp,%rbx)
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $38, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000, %edx, %edx
subl %edx, %eax
movl %eax, 480080(%rsp,%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L21
addq $1200, %rbp
cmpq $481200, %rbp
jne .L20
leaq 8(%rsp), %rdi
movl $480000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $480000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $480000, %esi
call cudaMalloc@PLT
leaq 80(%rsp), %rsi
movl $1, %ecx
movl $480000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 480080(%rsp), %rsi
movl $1, %ecx
movl $480000, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $300, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $400, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L33
.L23:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 960080(%rsp), %rbx
movl $2, %ecx
movl $480000, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq 480080(%rsp), %rsi
leaq 80(%rsp), %rdi
movq %rbx, %rdx
call _Z8checkSumPA300_iS0_S0_
testl %eax, %eax
je .L24
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rsp, %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd (%rsp), %xmm0
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
leaq 80(%rsp), %rcx
leaq 480080(%rsp), %rdi
movq %rbx, %rsi
movq %rdi, %r8
.L25:
movl $0, %eax
.L26:
movl (%rdi,%rax), %edx
addl (%rcx,%rax), %edx
movl %edx, (%rsi,%rax)
addq $4, %rax
cmpq $1200, %rax
jne .L26
addq $1200, %rcx
addq $1200, %rdi
addq $1200, %rsi
cmpq %r8, %rcx
jne .L25
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movq 64(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 4(%rsp), %rdi
movq 64(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 4(%rsp), %xmm0
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
.L28:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 1440088(%rsp), %rax
subq %fs:40, %rax
jne .L34
movl $0, %eax
addq $1440104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_
jmp .L23
.L24:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L28
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "_Z3sumPA300_iS0_S0_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z3sumPA300_iS0_S0_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
*To find sum of two large matrices
*Compute the speed up obtained by GPU
*/
#include <stdio.h>
const int ROW_SIZE = 300, COL_SIZE = 400;
const int MATRIX_BYTES = ROW_SIZE * COL_SIZE * sizeof(int);
//kernal
__global__ void sum(int d_sum[][ROW_SIZE], int d_a[][ROW_SIZE], int d_b[][ROW_SIZE]) {
d_sum[blockIdx.x][threadIdx.x] = d_a[blockIdx.x][threadIdx.x] + d_b[blockIdx.x][threadIdx.x];
}
//to check the final result
int checkSum(int h_a[][ROW_SIZE], int h_b[][ROW_SIZE], int h_sum[][ROW_SIZE]) {
int flag = 1;
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
if(h_sum[i][j] != h_a[i][j] + h_b[i][j]) {
flag = 0;
break;
}
}
}
return flag;
}
int main() {
int h_a[COL_SIZE][ROW_SIZE], h_b[COL_SIZE][ROW_SIZE], h_sum[COL_SIZE][ROW_SIZE];
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_a[i][j] = ((int)rand())%1000;
h_b[i][j] = ((int)rand())%1000;
}
}
int (*d_a)[ROW_SIZE], (*d_b)[ROW_SIZE], (*d_sum)[ROW_SIZE];
cudaMalloc((void**) &d_a, MATRIX_BYTES);
cudaMalloc((void**) &d_b, MATRIX_BYTES);
cudaMalloc((void**) &d_sum, MATRIX_BYTES);
cudaMemcpy(d_a, h_a, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
sum<<<COL_SIZE, ROW_SIZE>>>(d_sum, d_a, d_b);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(h_sum, d_sum, MATRIX_BYTES, cudaMemcpyDeviceToHost);
if(checkSum(h_a, h_b, h_sum)) {
printf("The result is computed successfully!\n");
cudaEventElapsedTime(&time, start, stop);
printf("Computation time taken by device: %f\n", time);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_sum[i][j] = h_a[i][j] + h_b[i][j];
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Computation time taken by host: %f\n", time);
}
else {
printf("The result is not computed correctly!");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_sum);
return 0;
} | /*
*To find sum of two large matrices
*Compute the speed up obtained by GPU
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
const int ROW_SIZE = 300, COL_SIZE = 400;
const int MATRIX_BYTES = ROW_SIZE * COL_SIZE * sizeof(int);
//kernal
__global__ void sum(int d_sum[][ROW_SIZE], int d_a[][ROW_SIZE], int d_b[][ROW_SIZE]) {
d_sum[blockIdx.x][threadIdx.x] = d_a[blockIdx.x][threadIdx.x] + d_b[blockIdx.x][threadIdx.x];
}
//to check the final result
int checkSum(int h_a[][ROW_SIZE], int h_b[][ROW_SIZE], int h_sum[][ROW_SIZE]) {
int flag = 1;
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
if(h_sum[i][j] != h_a[i][j] + h_b[i][j]) {
flag = 0;
break;
}
}
}
return flag;
}
int main() {
int h_a[COL_SIZE][ROW_SIZE], h_b[COL_SIZE][ROW_SIZE], h_sum[COL_SIZE][ROW_SIZE];
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_a[i][j] = ((int)rand())%1000;
h_b[i][j] = ((int)rand())%1000;
}
}
int (*d_a)[ROW_SIZE], (*d_b)[ROW_SIZE], (*d_sum)[ROW_SIZE];
hipMalloc((void**) &d_a, MATRIX_BYTES);
hipMalloc((void**) &d_b, MATRIX_BYTES);
hipMalloc((void**) &d_sum, MATRIX_BYTES);
hipMemcpy(d_a, h_a, MATRIX_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, MATRIX_BYTES, hipMemcpyHostToDevice);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
sum<<<COL_SIZE, ROW_SIZE>>>(d_sum, d_a, d_b);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipMemcpy(h_sum, d_sum, MATRIX_BYTES, hipMemcpyDeviceToHost);
if(checkSum(h_a, h_b, h_sum)) {
printf("The result is computed successfully!\n");
hipEventElapsedTime(&time, start, stop);
printf("Computation time taken by device: %f\n", time);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_sum[i][j] = h_a[i][j] + h_b[i][j];
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Computation time taken by host: %f\n", time);
}
else {
printf("The result is not computed correctly!");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_sum);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
*To find sum of two large matrices
*Compute the speed up obtained by GPU
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
const int ROW_SIZE = 300, COL_SIZE = 400;
const int MATRIX_BYTES = ROW_SIZE * COL_SIZE * sizeof(int);
//kernal
__global__ void sum(int d_sum[][ROW_SIZE], int d_a[][ROW_SIZE], int d_b[][ROW_SIZE]) {
d_sum[blockIdx.x][threadIdx.x] = d_a[blockIdx.x][threadIdx.x] + d_b[blockIdx.x][threadIdx.x];
}
//to check the final result
int checkSum(int h_a[][ROW_SIZE], int h_b[][ROW_SIZE], int h_sum[][ROW_SIZE]) {
int flag = 1;
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
if(h_sum[i][j] != h_a[i][j] + h_b[i][j]) {
flag = 0;
break;
}
}
}
return flag;
}
int main() {
int h_a[COL_SIZE][ROW_SIZE], h_b[COL_SIZE][ROW_SIZE], h_sum[COL_SIZE][ROW_SIZE];
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_a[i][j] = ((int)rand())%1000;
h_b[i][j] = ((int)rand())%1000;
}
}
int (*d_a)[ROW_SIZE], (*d_b)[ROW_SIZE], (*d_sum)[ROW_SIZE];
hipMalloc((void**) &d_a, MATRIX_BYTES);
hipMalloc((void**) &d_b, MATRIX_BYTES);
hipMalloc((void**) &d_sum, MATRIX_BYTES);
hipMemcpy(d_a, h_a, MATRIX_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, MATRIX_BYTES, hipMemcpyHostToDevice);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
sum<<<COL_SIZE, ROW_SIZE>>>(d_sum, d_a, d_b);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipMemcpy(h_sum, d_sum, MATRIX_BYTES, hipMemcpyDeviceToHost);
if(checkSum(h_a, h_b, h_sum)) {
printf("The result is computed successfully!\n");
hipEventElapsedTime(&time, start, stop);
printf("Computation time taken by device: %f\n", time);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_sum[i][j] = h_a[i][j] + h_b[i][j];
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Computation time taken by host: %f\n", time);
}
else {
printf("The result is not computed correctly!");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_sum);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3sumPA300_iS0_S0_
.globl _Z3sumPA300_iS0_S0_
.p2align 8
.type _Z3sumPA300_iS0_S0_,@function
_Z3sumPA300_iS0_S0_:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_mul_hi_u32 s8, s15, 0x4b0
s_mulk_i32 s15, 0x4b0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s6, s15
s_addc_u32 s3, s7, s8
s_add_u32 s0, s0, s15
s_addc_u32 s1, s1, s8
s_clause 0x1
global_load_b32 v1, v0, s[2:3]
global_load_b32 v2, v0, s[0:1]
s_add_u32 s0, s4, s15
s_addc_u32 s1, s5, s8
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3sumPA300_iS0_S0_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3sumPA300_iS0_S0_, .Lfunc_end0-_Z3sumPA300_iS0_S0_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3sumPA300_iS0_S0_
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z3sumPA300_iS0_S0_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
*To find sum of two large matrices
*Compute the speed up obtained by GPU
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
const int ROW_SIZE = 300, COL_SIZE = 400;
const int MATRIX_BYTES = ROW_SIZE * COL_SIZE * sizeof(int);
//kernal
__global__ void sum(int d_sum[][ROW_SIZE], int d_a[][ROW_SIZE], int d_b[][ROW_SIZE]) {
d_sum[blockIdx.x][threadIdx.x] = d_a[blockIdx.x][threadIdx.x] + d_b[blockIdx.x][threadIdx.x];
}
//to check the final result
int checkSum(int h_a[][ROW_SIZE], int h_b[][ROW_SIZE], int h_sum[][ROW_SIZE]) {
int flag = 1;
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
if(h_sum[i][j] != h_a[i][j] + h_b[i][j]) {
flag = 0;
break;
}
}
}
return flag;
}
int main() {
int h_a[COL_SIZE][ROW_SIZE], h_b[COL_SIZE][ROW_SIZE], h_sum[COL_SIZE][ROW_SIZE];
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_a[i][j] = ((int)rand())%1000;
h_b[i][j] = ((int)rand())%1000;
}
}
int (*d_a)[ROW_SIZE], (*d_b)[ROW_SIZE], (*d_sum)[ROW_SIZE];
hipMalloc((void**) &d_a, MATRIX_BYTES);
hipMalloc((void**) &d_b, MATRIX_BYTES);
hipMalloc((void**) &d_sum, MATRIX_BYTES);
hipMemcpy(d_a, h_a, MATRIX_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, MATRIX_BYTES, hipMemcpyHostToDevice);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
sum<<<COL_SIZE, ROW_SIZE>>>(d_sum, d_a, d_b);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipMemcpy(h_sum, d_sum, MATRIX_BYTES, hipMemcpyDeviceToHost);
if(checkSum(h_a, h_b, h_sum)) {
printf("The result is computed successfully!\n");
hipEventElapsedTime(&time, start, stop);
printf("Computation time taken by device: %f\n", time);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for(int i = 0; i < COL_SIZE; i++) {
for(int j = 0; j < ROW_SIZE; j++) {
h_sum[i][j] = h_a[i][j] + h_b[i][j];
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Computation time taken by host: %f\n", time);
}
else {
printf("The result is not computed correctly!");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_sum);
return 0;
} | .text
.file "A0Q2.hip"
.globl _Z18__device_stub__sumPA300_iS0_S0_ # -- Begin function _Z18__device_stub__sumPA300_iS0_S0_
.p2align 4, 0x90
.type _Z18__device_stub__sumPA300_iS0_S0_,@function
_Z18__device_stub__sumPA300_iS0_S0_: # @_Z18__device_stub__sumPA300_iS0_S0_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3sumPA300_iS0_S0_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__sumPA300_iS0_S0_, .Lfunc_end0-_Z18__device_stub__sumPA300_iS0_S0_
.cfi_endproc
# -- End function
.globl _Z8checkSumPA300_iS0_S0_ # -- Begin function _Z8checkSumPA300_iS0_S0_
.p2align 4, 0x90
.type _Z8checkSumPA300_iS0_S0_,@function
_Z8checkSumPA300_iS0_S0_: # @_Z8checkSumPA300_iS0_S0_
.cfi_startproc
# %bb.0:
movl $1, %eax
xorl %ecx, %ecx
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_4: # in Loop: Header=BB1_1 Depth=1
xorl %eax, %eax
.LBB1_5: # in Loop: Header=BB1_1 Depth=1
incq %rcx
addq $1200, %rsi # imm = 0x4B0
addq $1200, %rdi # imm = 0x4B0
addq $1200, %rdx # imm = 0x4B0
cmpq $400, %rcx # imm = 0x190
je .LBB1_6
.LBB1_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB1_3: # Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
movl (%rsi,%r8,4), %r9d
addl (%rdi,%r8,4), %r9d
cmpl %r9d, (%rdx,%r8,4)
jne .LBB1_4
# %bb.2: # in Loop: Header=BB1_3 Depth=2
incq %r8
cmpq $300, %r8 # imm = 0x12C
jne .LBB1_3
jmp .LBB1_5
.LBB1_6:
retq
.Lfunc_end1:
.size _Z8checkSumPA300_iS0_S0_, .Lfunc_end1-_Z8checkSumPA300_iS0_S0_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $1440112, %rsp # imm = 0x15F970
.cfi_def_cfa_offset 1440160
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 960112(%rsp), %rbx
leaq 480112(%rsp), %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_1: # %.preheader28
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%rbx,%r12,4)
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%r14,%r12,4)
incq %r12
cmpq $300, %r12 # imm = 0x12C
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %r15
addq $1200, %rbx # imm = 0x4B0
addq $1200, %r14 # imm = 0x4B0
cmpq $400, %r15 # imm = 0x190
jne .LBB2_1
# %bb.4:
leaq 40(%rsp), %rdi
movl $480000, %esi # imm = 0x75300
callq hipMalloc
leaq 32(%rsp), %rdi
movl $480000, %esi # imm = 0x75300
callq hipMalloc
leaq 24(%rsp), %rdi
movl $480000, %esi # imm = 0x75300
callq hipMalloc
movq 40(%rsp), %rdi
leaq 960112(%rsp), %rbx
movl $1, %ebp
movl $480000, %edx # imm = 0x75300
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
leaq 480112(%rsp), %r14
movl $480000, %edx # imm = 0x75300
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 80(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967596, %rdx # imm = 0x10000012C
leaq 100(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 24(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 104(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
movq %rsp, %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq (%rsp), %rcx
movl 8(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z3sumPA300_iS0_S0_, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movq 16(%rsp), %rdi
xorl %r12d, %r12d
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
leaq 112(%rsp), %r15
movl $480000, %edx # imm = 0x75300
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
jmp .LBB2_7
.p2align 4, 0x90
.LBB2_10: # in Loop: Header=BB2_7 Depth=1
xorl %ebp, %ebp
.LBB2_11: # in Loop: Header=BB2_7 Depth=1
incq %r12
addq $1200, %r14 # imm = 0x4B0
addq $1200, %rbx # imm = 0x4B0
addq $1200, %r15 # imm = 0x4B0
cmpq $400, %r12 # imm = 0x190
je .LBB2_12
.LBB2_7: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB2_9 Depth 2
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_9: # Parent Loop BB2_7 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r14,%rax,4), %ecx
addl (%rbx,%rax,4), %ecx
cmpl %ecx, (%r15,%rax,4)
jne .LBB2_10
# %bb.8: # in Loop: Header=BB2_9 Depth=2
incq %rax
cmpq $300, %rax # imm = 0x12C
jne .LBB2_9
jmp .LBB2_11
.LBB2_12: # %_Z8checkSumPA300_iS0_S0_.exit
testl %ebp, %ebp
je .LBB2_18
# %bb.13:
movl $.Lstr, %edi
callq puts@PLT
movq 80(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 72(%rsp), %rdi
callq hipEventElapsedTime
movss 72(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
leaq 48(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
movq 48(%rsp), %rdi
xorl %ebx, %ebx
xorl %esi, %esi
callq hipEventRecord
leaq 960112(%rsp), %rax
leaq 480112(%rsp), %rcx
leaq 112(%rsp), %rdx
.p2align 4, 0x90
.LBB2_14: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_15 Depth 2
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_15: # Parent Loop BB2_14 Depth=1
# => This Inner Loop Header: Depth=2
movl (%rcx,%rsi,4), %edi
addl (%rax,%rsi,4), %edi
movl %edi, (%rdx,%rsi,4)
incq %rsi
cmpq $300, %rsi # imm = 0x12C
jne .LBB2_15
# %bb.16: # in Loop: Header=BB2_14 Depth=1
incq %rbx
addq $1200, %rax # imm = 0x4B0
addq $1200, %rcx # imm = 0x4B0
addq $1200, %rdx # imm = 0x4B0
cmpq $400, %rbx # imm = 0x190
jne .LBB2_14
# %bb.17:
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movq 48(%rsp), %rsi
movq (%rsp), %rdx
leaq 64(%rsp), %rdi
callq hipEventElapsedTime
movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
jmp .LBB2_19
.LBB2_18:
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
.LBB2_19:
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $1440112, %rsp # imm = 0x15F970
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3sumPA300_iS0_S0_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3sumPA300_iS0_S0_,@object # @_Z3sumPA300_iS0_S0_
.section .rodata,"a",@progbits
.globl _Z3sumPA300_iS0_S0_
.p2align 3, 0x0
_Z3sumPA300_iS0_S0_:
.quad _Z18__device_stub__sumPA300_iS0_S0_
.size _Z3sumPA300_iS0_S0_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Computation time taken by device: %f\n"
.size .L.str.1, 38
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Computation time taken by host: %f\n"
.size .L.str.2, 36
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "The result is not computed correctly!"
.size .L.str.3, 38
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3sumPA300_iS0_S0_"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "The result is computed successfully!"
.size .Lstr, 37
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__sumPA300_iS0_S0_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3sumPA300_iS0_S0_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3sumPA300_iS0_S0_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 7.152557373046875e-05 ; /* 0x000004b0ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e6e0000002100 */
/*0050*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x001fc800078e0007 */
/*0060*/ IMAD.WIDE.U32 R4, R6, R7, c[0x0][0x170] ; /* 0x00005c0006047625 */
/* 0x000fc800078e0007 */
/*0070*/ IMAD.WIDE.U32 R2, R9, 0x4, R2 ; /* 0x0000000409027825 */
/* 0x002fc800078e0002 */
/*0080*/ IMAD.WIDE.U32 R4, R9, 0x4, R4 ; /* 0x0000000409047825 */
/* 0x000fe400078e0004 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00a0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00b0*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fcc00078e0007 */
/*00c0*/ IMAD.WIDE.U32 R6, R9, 0x4, R6 ; /* 0x0000000409067825 */
/* 0x000fe200078e0006 */
/*00d0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3sumPA300_iS0_S0_
.globl _Z3sumPA300_iS0_S0_
.p2align 8
.type _Z3sumPA300_iS0_S0_,@function
_Z3sumPA300_iS0_S0_:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_mul_hi_u32 s8, s15, 0x4b0
s_mulk_i32 s15, 0x4b0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s6, s15
s_addc_u32 s3, s7, s8
s_add_u32 s0, s0, s15
s_addc_u32 s1, s1, s8
s_clause 0x1
global_load_b32 v1, v0, s[2:3]
global_load_b32 v2, v0, s[0:1]
s_add_u32 s0, s4, s15
s_addc_u32 s1, s5, s8
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3sumPA300_iS0_S0_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3sumPA300_iS0_S0_, .Lfunc_end0-_Z3sumPA300_iS0_S0_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3sumPA300_iS0_S0_
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z3sumPA300_iS0_S0_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000a8442_00000000-6_A0Q2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8checkSumPA300_iS0_S0_
.type _Z8checkSumPA300_iS0_S0_, @function
_Z8checkSumPA300_iS0_S0_:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
movq %rdi, %r11
movq %rsi, %r10
movq %rdx, %r9
movl $0, %r8d
movl $1, %ebx
movl $0, %ebp
jmp .L4
.L8:
movl %ebp, %ebx
.L5:
addq $1200, %r8
cmpq $480000, %r8
je .L3
.L4:
leaq (%r9,%r8), %rdi
leaq (%r11,%r8), %rsi
leaq (%r10,%r8), %rcx
movl $0, %eax
.L6:
movl (%rcx,%rax), %edx
addl (%rsi,%rax), %edx
cmpl %edx, (%rdi,%rax)
jne .L8
addq $4, %rax
cmpq $1200, %rax
jne .L6
jmp .L5
.L3:
movl %ebx, %eax
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z8checkSumPA300_iS0_S0_, .-_Z8checkSumPA300_iS0_S0_
.globl _Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_
.type _Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_, @function
_Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3sumPA300_iS0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_, .-_Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_
.globl _Z3sumPA300_iS0_S0_
.type _Z3sumPA300_iS0_S0_, @function
_Z3sumPA300_iS0_S0_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z3sumPA300_iS0_S0_, .-_Z3sumPA300_iS0_S0_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "The result is computed successfully!\n"
.align 8
.LC1:
.string "Computation time taken by device: %f\n"
.align 8
.LC2:
.string "Computation time taken by host: %f\n"
.align 8
.LC3:
.string "The result is not computed correctly!"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
leaq -1437696(%rsp), %r11
.cfi_def_cfa 11, 1437720
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $2408, %rsp
.cfi_def_cfa_offset 1440128
movq %fs:40, %rax
movq %rax, 1440088(%rsp)
xorl %eax, %eax
movl $1200, %ebp
.L20:
leaq -1200(%rbp), %rbx
.L21:
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $38, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000, %edx, %edx
subl %edx, %eax
movl %eax, 80(%rsp,%rbx)
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $38, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000, %edx, %edx
subl %edx, %eax
movl %eax, 480080(%rsp,%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L21
addq $1200, %rbp
cmpq $481200, %rbp
jne .L20
leaq 8(%rsp), %rdi
movl $480000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $480000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $480000, %esi
call cudaMalloc@PLT
leaq 80(%rsp), %rsi
movl $1, %ecx
movl $480000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 480080(%rsp), %rsi
movl $1, %ecx
movl $480000, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $300, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $400, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L33
.L23:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 960080(%rsp), %rbx
movl $2, %ecx
movl $480000, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq 480080(%rsp), %rsi
leaq 80(%rsp), %rdi
movq %rbx, %rdx
call _Z8checkSumPA300_iS0_S0_
testl %eax, %eax
je .L24
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rsp, %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd (%rsp), %xmm0
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
leaq 80(%rsp), %rcx
leaq 480080(%rsp), %rdi
movq %rbx, %rsi
movq %rdi, %r8
.L25:
movl $0, %eax
.L26:
movl (%rdi,%rax), %edx
addl (%rcx,%rax), %edx
movl %edx, (%rsi,%rax)
addq $4, %rax
cmpq $1200, %rax
jne .L26
addq $1200, %rcx
addq $1200, %rdi
addq $1200, %rsi
cmpq %r8, %rcx
jne .L25
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movq 64(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 4(%rsp), %rdi
movq 64(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 4(%rsp), %xmm0
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
.L28:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 1440088(%rsp), %rax
subq %fs:40, %rax
jne .L34
movl $0, %eax
addq $1440104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z33__device_stub__Z3sumPA300_iS0_S0_PA300_iS0_S0_
jmp .L23
.L24:
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L28
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "_Z3sumPA300_iS0_S0_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z3sumPA300_iS0_S0_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "A0Q2.hip"
.globl _Z18__device_stub__sumPA300_iS0_S0_ # -- Begin function _Z18__device_stub__sumPA300_iS0_S0_
.p2align 4, 0x90
.type _Z18__device_stub__sumPA300_iS0_S0_,@function
_Z18__device_stub__sumPA300_iS0_S0_: # @_Z18__device_stub__sumPA300_iS0_S0_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3sumPA300_iS0_S0_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__sumPA300_iS0_S0_, .Lfunc_end0-_Z18__device_stub__sumPA300_iS0_S0_
.cfi_endproc
# -- End function
.globl _Z8checkSumPA300_iS0_S0_ # -- Begin function _Z8checkSumPA300_iS0_S0_
.p2align 4, 0x90
.type _Z8checkSumPA300_iS0_S0_,@function
_Z8checkSumPA300_iS0_S0_: # @_Z8checkSumPA300_iS0_S0_
.cfi_startproc
# %bb.0:
movl $1, %eax
xorl %ecx, %ecx
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_4: # in Loop: Header=BB1_1 Depth=1
xorl %eax, %eax
.LBB1_5: # in Loop: Header=BB1_1 Depth=1
incq %rcx
addq $1200, %rsi # imm = 0x4B0
addq $1200, %rdi # imm = 0x4B0
addq $1200, %rdx # imm = 0x4B0
cmpq $400, %rcx # imm = 0x190
je .LBB1_6
.LBB1_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB1_3: # Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
movl (%rsi,%r8,4), %r9d
addl (%rdi,%r8,4), %r9d
cmpl %r9d, (%rdx,%r8,4)
jne .LBB1_4
# %bb.2: # in Loop: Header=BB1_3 Depth=2
incq %r8
cmpq $300, %r8 # imm = 0x12C
jne .LBB1_3
jmp .LBB1_5
.LBB1_6:
retq
.Lfunc_end1:
.size _Z8checkSumPA300_iS0_S0_, .Lfunc_end1-_Z8checkSumPA300_iS0_S0_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $1440112, %rsp # imm = 0x15F970
.cfi_def_cfa_offset 1440160
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 960112(%rsp), %rbx
leaq 480112(%rsp), %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_1: # %.preheader28
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%rbx,%r12,4)
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%r14,%r12,4)
incq %r12
cmpq $300, %r12 # imm = 0x12C
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %r15
addq $1200, %rbx # imm = 0x4B0
addq $1200, %r14 # imm = 0x4B0
cmpq $400, %r15 # imm = 0x190
jne .LBB2_1
# %bb.4:
leaq 40(%rsp), %rdi
movl $480000, %esi # imm = 0x75300
callq hipMalloc
leaq 32(%rsp), %rdi
movl $480000, %esi # imm = 0x75300
callq hipMalloc
leaq 24(%rsp), %rdi
movl $480000, %esi # imm = 0x75300
callq hipMalloc
movq 40(%rsp), %rdi
leaq 960112(%rsp), %rbx
movl $1, %ebp
movl $480000, %edx # imm = 0x75300
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
leaq 480112(%rsp), %r14
movl $480000, %edx # imm = 0x75300
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 80(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967596, %rdx # imm = 0x10000012C
leaq 100(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 24(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 104(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
movq %rsp, %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq (%rsp), %rcx
movl 8(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z3sumPA300_iS0_S0_, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movq 16(%rsp), %rdi
xorl %r12d, %r12d
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
leaq 112(%rsp), %r15
movl $480000, %edx # imm = 0x75300
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
jmp .LBB2_7
.p2align 4, 0x90
.LBB2_10: # in Loop: Header=BB2_7 Depth=1
xorl %ebp, %ebp
.LBB2_11: # in Loop: Header=BB2_7 Depth=1
incq %r12
addq $1200, %r14 # imm = 0x4B0
addq $1200, %rbx # imm = 0x4B0
addq $1200, %r15 # imm = 0x4B0
cmpq $400, %r12 # imm = 0x190
je .LBB2_12
.LBB2_7: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB2_9 Depth 2
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_9: # Parent Loop BB2_7 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r14,%rax,4), %ecx
addl (%rbx,%rax,4), %ecx
cmpl %ecx, (%r15,%rax,4)
jne .LBB2_10
# %bb.8: # in Loop: Header=BB2_9 Depth=2
incq %rax
cmpq $300, %rax # imm = 0x12C
jne .LBB2_9
jmp .LBB2_11
.LBB2_12: # %_Z8checkSumPA300_iS0_S0_.exit
testl %ebp, %ebp
je .LBB2_18
# %bb.13:
movl $.Lstr, %edi
callq puts@PLT
movq 80(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 72(%rsp), %rdi
callq hipEventElapsedTime
movss 72(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
leaq 48(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
movq 48(%rsp), %rdi
xorl %ebx, %ebx
xorl %esi, %esi
callq hipEventRecord
leaq 960112(%rsp), %rax
leaq 480112(%rsp), %rcx
leaq 112(%rsp), %rdx
.p2align 4, 0x90
.LBB2_14: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_15 Depth 2
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_15: # Parent Loop BB2_14 Depth=1
# => This Inner Loop Header: Depth=2
movl (%rcx,%rsi,4), %edi
addl (%rax,%rsi,4), %edi
movl %edi, (%rdx,%rsi,4)
incq %rsi
cmpq $300, %rsi # imm = 0x12C
jne .LBB2_15
# %bb.16: # in Loop: Header=BB2_14 Depth=1
incq %rbx
addq $1200, %rax # imm = 0x4B0
addq $1200, %rcx # imm = 0x4B0
addq $1200, %rdx # imm = 0x4B0
cmpq $400, %rbx # imm = 0x190
jne .LBB2_14
# %bb.17:
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movq 48(%rsp), %rsi
movq (%rsp), %rdx
leaq 64(%rsp), %rdi
callq hipEventElapsedTime
movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
jmp .LBB2_19
.LBB2_18:
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
.LBB2_19:
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $1440112, %rsp # imm = 0x15F970
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3sumPA300_iS0_S0_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3sumPA300_iS0_S0_,@object # @_Z3sumPA300_iS0_S0_
.section .rodata,"a",@progbits
.globl _Z3sumPA300_iS0_S0_
.p2align 3, 0x0
_Z3sumPA300_iS0_S0_:
.quad _Z18__device_stub__sumPA300_iS0_S0_
.size _Z3sumPA300_iS0_S0_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Computation time taken by device: %f\n"
.size .L.str.1, 38
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Computation time taken by host: %f\n"
.size .L.str.2, 36
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "The result is not computed correctly!"
.size .L.str.3, 38
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3sumPA300_iS0_S0_"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "The result is computed successfully!"
.size .Lstr, 37
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__sumPA300_iS0_S0_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3sumPA300_iS0_S0_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#define NUM_THREADS 256
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
void matgen(float* a, int lda, int n)
{
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
a[i * lda + j] = (float) rand() / RAND_MAX;
}
}
}
void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
int i, j, k;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
double t = 0;
for(k = 0; k < n; k++) {
t += a[i * lda + k] * b[k * ldb + j];
}
c[i * ldc + j] = t;
}
}
}
void compare_mat(const float* a, int lda, const float* b, int ldb, int n)
{
float max_err = 0;
float average_err = 0;
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
if(b[i * ldb + j] != 0) {
float err = fabs((a[i * lda + j] - b[i * ldb + j]) / b[i * ldb + j]);
if(max_err < err) max_err = err;
average_err += err;
}
}
}
printf("Max error: %g Average error: %g\n", max_err, average_err / (n * n));
}
__global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
{
extern __shared__ float data[];
const int tid = threadIdx.x;
// const int bid = blockIdx.x;
// const int idx = bid * blockDim.x + tid;
const int row = blockIdx.x;
// const int column = idx % n;
int i, j;
for(i = tid; i < n; i += blockDim.x) {
data[i] = a[row * lda + i];
}
__syncthreads();
for(j = tid; j < n; j += blockDim.x) {
float t = 0;
float y = 0;
for(i = 0; i < n; i++) {
// t += a[row * lda + i] * b[i * ldb + column];
float r;
y -= data[i] * b[i * ldb + j];
r = t - y;
y = (r - t) + y;
t = r;
}
c[row * ldc + j] = t;
}
}
clock_t matmultCUDA(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
float *ac, *bc, *cc;
clock_t start, end;
start = clock();
// cudaMalloc((void**) &ac, sizeof(float) * n * n);
// cudaMalloc((void**) &bc, sizeof(float) * n * n);
// cudaMalloc((void**) &cc, sizeof(float) * n * n);
size_t pitch_a, pitch_b, pitch_c;
cudaMallocPitch((void**) &ac, &pitch_a, sizeof(float) * n, n);
cudaMallocPitch((void**) &bc, &pitch_b, sizeof(float) * n, n);
cudaMallocPitch((void**) &cc, &pitch_c, sizeof(float) * n, n);
// cudaMemcpy2D(ac, sizeof(float) * n, a, sizeof(float) * lda, sizeof(float) * n, n, cudaMemcpyHostToDevice);
// cudaMemcpy2D(bc, sizeof(float) * n, b, sizeof(float) * ldb, sizeof(float) * n, n, cudaMemcpyHostToDevice);
cudaMemcpy2D(ac, pitch_a, a, sizeof(float) * lda, sizeof(float) * n, n, cudaMemcpyHostToDevice);
cudaMemcpy2D(bc, pitch_b, b, sizeof(float) * ldb, sizeof(float) * n, n, cudaMemcpyHostToDevice);
// int blocks = (n + NUM_THREADS - 1) / NUM_THREADS;
// matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, n, bc, n, cc, n, n);
matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, pitch_a / sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n);
// cudaMemcpy2D(c, sizeof(float) * ldc, cc, sizeof(float) * n, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
cudaMemcpy2D(c, sizeof(float) * ldc, cc, pitch_c, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
cudaFree(ac);
cudaFree(bc);
cudaFree(cc);
end = clock();
return end - start;
}
int main()
{
float *a, *b, *c, *d;
int n = 1000;
if(!InitCUDA()) return 0;
a = (float*) malloc(sizeof(float) * n * n);
b = (float*) malloc(sizeof(float) * n * n);
c = (float*) malloc(sizeof(float) * n * n);
d = (float*) malloc(sizeof(float) * n * n);
srand(0);
matgen(a, n, n);
matgen(b, n, n);
clock_t time = matmultCUDA(a, n, b, n, c, n, n);
clock_t startc, timec;
startc = clock();
matmult(a, n, b, n, d, n, n);
timec = clock() - startc;
compare_mat(c, n, d, n, n);
printf("GPU time used: %f \n", (double) time / CLOCKS_PER_SEC);
printf("CPU time used: %f \n", (double) timec / CLOCKS_PER_SEC);
return 0;
} | .file "tmpxft_0014ed6f_00000000-6_2_m_sha.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi, @function
_ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi:
.LFB2087:
.cfi_startproc
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L5
.L1:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _ZL11matMultCUDAPKfmS0_mPfmi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L1
.L6:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi, .-_ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi
.type _ZL11matMultCUDAPKfmS0_mPfmi, @function
_ZL11matMultCUDAPKfmS0_mPfmi:
.LFB2088:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL11matMultCUDAPKfmS0_mPfmi, .-_ZL11matMultCUDAPKfmS0_mPfmi
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "There is no device.\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "There is no device supporting CUDA 1.x.\n"
.text
.globl _Z8InitCUDAv
.type _Z8InitCUDAv, @function
_Z8InitCUDAv:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $1064, %rsp
.cfi_def_cfa_offset 1088
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %eax
testl %eax, %eax
je .L24
movl $0, %ebx
leaq 16(%rsp), %rbp
jg .L16
jmp .L18
.L24:
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
movl $0, %eax
jmp .L11
.L14:
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jle .L17
.L16:
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
testl %eax, %eax
jne .L14
cmpl $0, 376(%rsp)
jle .L14
.L17:
cmpl %ebx, 12(%rsp)
je .L25
.L18:
movl %ebx, %edi
call cudaSetDevice@PLT
movl $1, %eax
.L11:
movq 1048(%rsp), %rdx
subq %fs:40, %rdx
jne .L26
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %eax
jmp .L11
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z8InitCUDAv, .-_Z8InitCUDAv
.globl _Z6matgenPfii
.type _Z6matgenPfii, @function
_Z6matgenPfii:
.LFB2058:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L33
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %edx, %r15d
movslq %esi, %rsi
leaq 0(,%rsi,4), %r13
movslq %edx, %r14
leaq (%rdi,%r14,4), %rbp
negq %r14
salq $2, %r14
movl $0, %r12d
.L29:
leaq 0(%rbp,%r14), %rbx
.L30:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L30
addl $1, %r12d
addq %r13, %rbp
cmpl %r12d, %r15d
jne .L29
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2058:
.size _Z6matgenPfii, .-_Z6matgenPfii
.globl _Z7matmultPKfiS0_iPfii
.type _Z7matmultPKfiS0_iPfii, @function
_Z7matmultPKfiS0_iPfii:
.LFB2059:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl 48(%rsp), %r12d
testl %r12d, %r12d
jle .L36
movq %rdx, %r11
movl %ecx, %eax
movslq %r9d, %rbp
salq $2, %rbp
movslq %esi, %rsi
leaq 0(,%rsi,4), %rbx
movq %rdi, %r9
movslq %r12d, %r10
leaq (%rdi,%r10,4), %rcx
cltq
leaq 0(,%rax,4), %rsi
movl $0, %r14d
.L38:
movq %r11, %r13
movl $0, %edi
.L41:
movq %r13, %rdx
movq %r9, %rax
pxor %xmm1, %xmm1
.L39:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
cvtss2sd %xmm0, %xmm0
addsd %xmm0, %xmm1
addq $4, %rax
addq %rsi, %rdx
cmpq %rcx, %rax
jne .L39
cvtsd2ss %xmm1, %xmm1
movss %xmm1, (%r8,%rdi,4)
addq $1, %rdi
addq $4, %r13
cmpq %r10, %rdi
jne .L41
addl $1, %r14d
addq %rbp, %r8
addq %rbx, %r9
addq %rbx, %rcx
cmpl %r14d, %r12d
jne .L38
.L36:
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z7matmultPKfiS0_iPfii, .-_Z7matmultPKfiS0_iPfii
.section .rodata.str1.8
.align 8
.LC6:
.string "Max error: %g Average error: %g\n"
.text
.globl _Z11compare_matPKfiS0_ii
.type _Z11compare_matPKfiS0_ii, @function
_Z11compare_matPKfiS0_ii:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
testl %r8d, %r8d
jle .L51
movq %rdi, %r9
movl %esi, %r10d
movl %r8d, %esi
movslq %ecx, %rcx
leaq 0(,%rcx,4), %rbx
movl $0, %ebp
movl $0, %r11d
pxor %xmm1, %xmm1
movaps %xmm1, %xmm4
movaps %xmm1, %xmm3
movss .LC5(%rip), %xmm5
jmp .L46
.L53:
movslq %eax, %rdi
movss (%r9,%rdi,4), %xmm0
subss %xmm2, %xmm0
divss %xmm2, %xmm0
andps %xmm5, %xmm0
movaps %xmm0, %xmm6
maxss %xmm4, %xmm6
movaps %xmm6, %xmm4
addss %xmm0, %xmm1
.L47:
addq $4, %rcx
addl $1, %eax
cmpl %eax, %esi
je .L56
.L50:
movss (%rcx), %xmm2
ucomiss %xmm3, %xmm2
jp .L53
je .L47
jmp .L53
.L56:
addl $1, %r11d
addl %r10d, %esi
addq %rbx, %rdx
addl %r10d, %ebp
cmpl %r11d, %r8d
je .L45
.L46:
movl %ebp, %eax
movq %rdx, %rcx
jmp .L50
.L51:
pxor %xmm1, %xmm1
movaps %xmm1, %xmm4
.L45:
imull %r8d, %r8d
pxor %xmm0, %xmm0
cvtsi2ssl %r8d, %xmm0
divss %xmm0, %xmm1
pxor %xmm0, %xmm0
cvtss2sd %xmm4, %xmm0
cvtss2sd %xmm1, %xmm1
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z11compare_matPKfiS0_ii, .-_Z11compare_matPKfiS0_ii
.globl _Z11matmultCUDAPKfiS0_iPfii
.type _Z11matmultCUDAPKfiS0_iPfii, @function
_Z11matmultCUDAPKfiS0_iPfii:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %rdi, (%rsp)
movl %esi, %r13d
movq %rdx, 8(%rsp)
movl %ecx, %r12d
movq %r8, 16(%rsp)
movl %r9d, 28(%rsp)
movl 176(%rsp), %r15d
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
call clock@PLT
movq %rax, %r14
movslq %r15d, %rbp
leaq 0(,%rbp,4), %rbx
leaq 56(%rsp), %rsi
leaq 32(%rsp), %rdi
movq %rbp, %rcx
movq %rbx, %rdx
call cudaMallocPitch@PLT
leaq 64(%rsp), %rsi
leaq 40(%rsp), %rdi
movq %rbp, %rcx
movq %rbx, %rdx
call cudaMallocPitch@PLT
leaq 72(%rsp), %rsi
leaq 48(%rsp), %rdi
movq %rbp, %rcx
movq %rbx, %rdx
call cudaMallocPitch@PLT
movslq %r13d, %rcx
salq $2, %rcx
subq $8, %rsp
.cfi_def_cfa_offset 184
pushq $1
.cfi_def_cfa_offset 192
movq %rbp, %r9
movq %rbx, %r8
movq 16(%rsp), %rdx
movq 72(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy2D@PLT
movslq %r12d, %rcx
salq $2, %rcx
movl $1, (%rsp)
movq %rbp, %r9
movq %rbx, %r8
movq 24(%rsp), %rdx
movq 80(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy2D@PLT
movl $256, 108(%rsp)
movl $1, 112(%rsp)
movl %r15d, 96(%rsp)
movl $1, 100(%rsp)
addq $16, %rsp
.cfi_def_cfa_offset 176
movl $0, %r9d
movq %rbx, %r8
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L61
.L58:
movslq 28(%rsp), %rsi
salq $2, %rsi
subq $8, %rsp
.cfi_def_cfa_offset 184
pushq $2
.cfi_def_cfa_offset 192
movq %rbp, %r9
movq %rbx, %r8
movq 88(%rsp), %rcx
movq 64(%rsp), %rdx
movq 32(%rsp), %rdi
call cudaMemcpy2D@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
call clock@PLT
subq %r14, %rax
movq 104(%rsp), %rdx
subq %fs:40, %rdx
jne .L62
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L61:
.cfi_restore_state
movq 64(%rsp), %rcx
shrq $2, %rcx
movq 56(%rsp), %rsi
shrq $2, %rsi
subq $8, %rsp
.cfi_def_cfa_offset 184
pushq %r15
.cfi_def_cfa_offset 192
movq 88(%rsp), %r9
shrq $2, %r9
movq 64(%rsp), %r8
movq 56(%rsp), %rdx
movq 48(%rsp), %rdi
call _ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L58
.L62:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z11matmultCUDAPKfiS0_iPfii, .-_Z11matmultCUDAPKfiS0_iPfii
.section .rodata.str1.1
.LC8:
.string "GPU time used: %f \n"
.LC9:
.string "CPU time used: %f \n"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
call _Z8InitCUDAv
testb %al, %al
jne .L66
.L64:
movl $0, %eax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L66:
.cfi_restore_state
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $4000000, %edi
call malloc@PLT
movq %rax, %r12
movl $4000000, %edi
call malloc@PLT
movq %rax, %r13
movl $0, %edi
call srand@PLT
movl $1000, %edx
movl $1000, %esi
movq %rbx, %rdi
call _Z6matgenPfii
movl $1000, %edx
movl $1000, %esi
movq %rbp, %rdi
call _Z6matgenPfii
subq $8, %rsp
.cfi_def_cfa_offset 72
pushq $1000
.cfi_def_cfa_offset 80
movl $1000, %r9d
movq %r12, %r8
movl $1000, %ecx
movq %rbp, %rdx
movl $1000, %esi
movq %rbx, %rdi
call _Z11matmultCUDAPKfiS0_iPfii
movq %rax, %r14
call clock@PLT
movq %rax, %r15
movl $1000, (%rsp)
movl $1000, %r9d
movq %r13, %r8
movl $1000, %ecx
movq %rbp, %rdx
movl $1000, %esi
movq %rbx, %rdi
call _Z7matmultPKfiS0_iPfii
call clock@PLT
subq %r15, %rax
movq %rax, %rbx
addq $16, %rsp
.cfi_def_cfa_offset 64
movl $1000, %r8d
movl $1000, %ecx
movq %r13, %rdx
movl $1000, %esi
movq %r12, %rdi
call _Z11compare_matPKfiS0_ii
pxor %xmm0, %xmm0
cvtsi2sdq %r14, %xmm0
divsd .LC7(%rip), %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
divsd .LC7(%rip), %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L64
.cfi_endproc
.LFE2062:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z11matMultCUDAPKfmS0_mPfmi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL11matMultCUDAPKfmS0_mPfmi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 805306368
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC5:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC7:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#define NUM_THREADS 256
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
void matgen(float* a, int lda, int n)
{
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
a[i * lda + j] = (float) rand() / RAND_MAX;
}
}
}
void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
int i, j, k;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
double t = 0;
for(k = 0; k < n; k++) {
t += a[i * lda + k] * b[k * ldb + j];
}
c[i * ldc + j] = t;
}
}
}
void compare_mat(const float* a, int lda, const float* b, int ldb, int n)
{
float max_err = 0;
float average_err = 0;
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
if(b[i * ldb + j] != 0) {
float err = fabs((a[i * lda + j] - b[i * ldb + j]) / b[i * ldb + j]);
if(max_err < err) max_err = err;
average_err += err;
}
}
}
printf("Max error: %g Average error: %g\n", max_err, average_err / (n * n));
}
__global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
{
extern __shared__ float data[];
const int tid = threadIdx.x;
// const int bid = blockIdx.x;
// const int idx = bid * blockDim.x + tid;
const int row = blockIdx.x;
// const int column = idx % n;
int i, j;
for(i = tid; i < n; i += blockDim.x) {
data[i] = a[row * lda + i];
}
__syncthreads();
for(j = tid; j < n; j += blockDim.x) {
float t = 0;
float y = 0;
for(i = 0; i < n; i++) {
// t += a[row * lda + i] * b[i * ldb + column];
float r;
y -= data[i] * b[i * ldb + j];
r = t - y;
y = (r - t) + y;
t = r;
}
c[row * ldc + j] = t;
}
}
clock_t matmultCUDA(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
float *ac, *bc, *cc;
clock_t start, end;
start = clock();
// cudaMalloc((void**) &ac, sizeof(float) * n * n);
// cudaMalloc((void**) &bc, sizeof(float) * n * n);
// cudaMalloc((void**) &cc, sizeof(float) * n * n);
size_t pitch_a, pitch_b, pitch_c;
cudaMallocPitch((void**) &ac, &pitch_a, sizeof(float) * n, n);
cudaMallocPitch((void**) &bc, &pitch_b, sizeof(float) * n, n);
cudaMallocPitch((void**) &cc, &pitch_c, sizeof(float) * n, n);
// cudaMemcpy2D(ac, sizeof(float) * n, a, sizeof(float) * lda, sizeof(float) * n, n, cudaMemcpyHostToDevice);
// cudaMemcpy2D(bc, sizeof(float) * n, b, sizeof(float) * ldb, sizeof(float) * n, n, cudaMemcpyHostToDevice);
cudaMemcpy2D(ac, pitch_a, a, sizeof(float) * lda, sizeof(float) * n, n, cudaMemcpyHostToDevice);
cudaMemcpy2D(bc, pitch_b, b, sizeof(float) * ldb, sizeof(float) * n, n, cudaMemcpyHostToDevice);
// int blocks = (n + NUM_THREADS - 1) / NUM_THREADS;
// matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, n, bc, n, cc, n, n);
matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, pitch_a / sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n);
// cudaMemcpy2D(c, sizeof(float) * ldc, cc, sizeof(float) * n, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
cudaMemcpy2D(c, sizeof(float) * ldc, cc, pitch_c, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
cudaFree(ac);
cudaFree(bc);
cudaFree(cc);
end = clock();
return end - start;
}
int main()
{
float *a, *b, *c, *d;
int n = 1000;
if(!InitCUDA()) return 0;
a = (float*) malloc(sizeof(float) * n * n);
b = (float*) malloc(sizeof(float) * n * n);
c = (float*) malloc(sizeof(float) * n * n);
d = (float*) malloc(sizeof(float) * n * n);
srand(0);
matgen(a, n, n);
matgen(b, n, n);
clock_t time = matmultCUDA(a, n, b, n, c, n, n);
clock_t startc, timec;
startc = clock();
matmult(a, n, b, n, d, n, n);
timec = clock() - startc;
compare_mat(c, n, d, n, n);
printf("GPU time used: %f \n", (double) time / CLOCKS_PER_SEC);
printf("CPU time used: %f \n", (double) timec / CLOCKS_PER_SEC);
return 0;
} | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define NUM_THREADS 256
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i);
return true;
}
void matgen(float* a, int lda, int n)
{
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
a[i * lda + j] = (float) rand() / RAND_MAX;
}
}
}
void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
int i, j, k;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
double t = 0;
for(k = 0; k < n; k++) {
t += a[i * lda + k] * b[k * ldb + j];
}
c[i * ldc + j] = t;
}
}
}
void compare_mat(const float* a, int lda, const float* b, int ldb, int n)
{
float max_err = 0;
float average_err = 0;
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
if(b[i * ldb + j] != 0) {
float err = fabs((a[i * lda + j] - b[i * ldb + j]) / b[i * ldb + j]);
if(max_err < err) max_err = err;
average_err += err;
}
}
}
printf("Max error: %g Average error: %g\n", max_err, average_err / (n * n));
}
__global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
{
extern __shared__ float data[];
const int tid = threadIdx.x;
// const int bid = blockIdx.x;
// const int idx = bid * blockDim.x + tid;
const int row = blockIdx.x;
// const int column = idx % n;
int i, j;
for(i = tid; i < n; i += blockDim.x) {
data[i] = a[row * lda + i];
}
__syncthreads();
for(j = tid; j < n; j += blockDim.x) {
float t = 0;
float y = 0;
for(i = 0; i < n; i++) {
// t += a[row * lda + i] * b[i * ldb + column];
float r;
y -= data[i] * b[i * ldb + j];
r = t - y;
y = (r - t) + y;
t = r;
}
c[row * ldc + j] = t;
}
}
clock_t matmultCUDA(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
float *ac, *bc, *cc;
clock_t start, end;
start = clock();
// cudaMalloc((void**) &ac, sizeof(float) * n * n);
// cudaMalloc((void**) &bc, sizeof(float) * n * n);
// cudaMalloc((void**) &cc, sizeof(float) * n * n);
size_t pitch_a, pitch_b, pitch_c;
hipMallocPitch((void**) &ac, &pitch_a, sizeof(float) * n, n);
hipMallocPitch((void**) &bc, &pitch_b, sizeof(float) * n, n);
hipMallocPitch((void**) &cc, &pitch_c, sizeof(float) * n, n);
// cudaMemcpy2D(ac, sizeof(float) * n, a, sizeof(float) * lda, sizeof(float) * n, n, cudaMemcpyHostToDevice);
// cudaMemcpy2D(bc, sizeof(float) * n, b, sizeof(float) * ldb, sizeof(float) * n, n, cudaMemcpyHostToDevice);
hipMemcpy2D(ac, pitch_a, a, sizeof(float) * lda, sizeof(float) * n, n, hipMemcpyHostToDevice);
hipMemcpy2D(bc, pitch_b, b, sizeof(float) * ldb, sizeof(float) * n, n, hipMemcpyHostToDevice);
// int blocks = (n + NUM_THREADS - 1) / NUM_THREADS;
// matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, n, bc, n, cc, n, n);
matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, pitch_a / sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n);
// cudaMemcpy2D(c, sizeof(float) * ldc, cc, sizeof(float) * n, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
hipMemcpy2D(c, sizeof(float) * ldc, cc, pitch_c, sizeof(float) * n, n, hipMemcpyDeviceToHost);
hipFree(ac);
hipFree(bc);
hipFree(cc);
end = clock();
return end - start;
}
int main()
{
float *a, *b, *c, *d;
int n = 1000;
if(!InitCUDA()) return 0;
a = (float*) malloc(sizeof(float) * n * n);
b = (float*) malloc(sizeof(float) * n * n);
c = (float*) malloc(sizeof(float) * n * n);
d = (float*) malloc(sizeof(float) * n * n);
srand(0);
matgen(a, n, n);
matgen(b, n, n);
clock_t time = matmultCUDA(a, n, b, n, c, n, n);
clock_t startc, timec;
startc = clock();
matmult(a, n, b, n, d, n, n);
timec = clock() - startc;
compare_mat(c, n, d, n, n);
printf("GPU time used: %f \n", (double) time / CLOCKS_PER_SEC);
printf("CPU time used: %f \n", (double) timec / CLOCKS_PER_SEC);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define NUM_THREADS 256
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i);
return true;
}
void matgen(float* a, int lda, int n)
{
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
a[i * lda + j] = (float) rand() / RAND_MAX;
}
}
}
void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
int i, j, k;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
double t = 0;
for(k = 0; k < n; k++) {
t += a[i * lda + k] * b[k * ldb + j];
}
c[i * ldc + j] = t;
}
}
}
void compare_mat(const float* a, int lda, const float* b, int ldb, int n)
{
float max_err = 0;
float average_err = 0;
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
if(b[i * ldb + j] != 0) {
float err = fabs((a[i * lda + j] - b[i * ldb + j]) / b[i * ldb + j]);
if(max_err < err) max_err = err;
average_err += err;
}
}
}
printf("Max error: %g Average error: %g\n", max_err, average_err / (n * n));
}
__global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
{
extern __shared__ float data[];
const int tid = threadIdx.x;
// const int bid = blockIdx.x;
// const int idx = bid * blockDim.x + tid;
const int row = blockIdx.x;
// const int column = idx % n;
int i, j;
for(i = tid; i < n; i += blockDim.x) {
data[i] = a[row * lda + i];
}
__syncthreads();
for(j = tid; j < n; j += blockDim.x) {
float t = 0;
float y = 0;
for(i = 0; i < n; i++) {
// t += a[row * lda + i] * b[i * ldb + column];
float r;
y -= data[i] * b[i * ldb + j];
r = t - y;
y = (r - t) + y;
t = r;
}
c[row * ldc + j] = t;
}
}
clock_t matmultCUDA(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
float *ac, *bc, *cc;
clock_t start, end;
start = clock();
// cudaMalloc((void**) &ac, sizeof(float) * n * n);
// cudaMalloc((void**) &bc, sizeof(float) * n * n);
// cudaMalloc((void**) &cc, sizeof(float) * n * n);
size_t pitch_a, pitch_b, pitch_c;
hipMallocPitch((void**) &ac, &pitch_a, sizeof(float) * n, n);
hipMallocPitch((void**) &bc, &pitch_b, sizeof(float) * n, n);
hipMallocPitch((void**) &cc, &pitch_c, sizeof(float) * n, n);
// cudaMemcpy2D(ac, sizeof(float) * n, a, sizeof(float) * lda, sizeof(float) * n, n, cudaMemcpyHostToDevice);
// cudaMemcpy2D(bc, sizeof(float) * n, b, sizeof(float) * ldb, sizeof(float) * n, n, cudaMemcpyHostToDevice);
hipMemcpy2D(ac, pitch_a, a, sizeof(float) * lda, sizeof(float) * n, n, hipMemcpyHostToDevice);
hipMemcpy2D(bc, pitch_b, b, sizeof(float) * ldb, sizeof(float) * n, n, hipMemcpyHostToDevice);
// int blocks = (n + NUM_THREADS - 1) / NUM_THREADS;
// matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, n, bc, n, cc, n, n);
matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, pitch_a / sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n);
// cudaMemcpy2D(c, sizeof(float) * ldc, cc, sizeof(float) * n, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
hipMemcpy2D(c, sizeof(float) * ldc, cc, pitch_c, sizeof(float) * n, n, hipMemcpyDeviceToHost);
hipFree(ac);
hipFree(bc);
hipFree(cc);
end = clock();
return end - start;
}
int main()
{
float *a, *b, *c, *d;
int n = 1000;
if(!InitCUDA()) return 0;
a = (float*) malloc(sizeof(float) * n * n);
b = (float*) malloc(sizeof(float) * n * n);
c = (float*) malloc(sizeof(float) * n * n);
d = (float*) malloc(sizeof(float) * n * n);
srand(0);
matgen(a, n, n);
matgen(b, n, n);
clock_t time = matmultCUDA(a, n, b, n, c, n, n);
clock_t startc, timec;
startc = clock();
matmult(a, n, b, n, d, n, n);
timec = clock() - startc;
compare_mat(c, n, d, n, n);
printf("GPU time used: %f \n", (double) time / CLOCKS_PER_SEC);
printf("CPU time used: %f \n", (double) timec / CLOCKS_PER_SEC);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._ZL11matMultCUDAPKfmS0_mPfmi,"axG",@progbits,_ZL11matMultCUDAPKfmS0_mPfmi,comdat
.globl _ZL11matMultCUDAPKfmS0_mPfmi
.p2align 8
.type _ZL11matMultCUDAPKfmS0_mPfmi,@function
_ZL11matMultCUDAPKfmS0_mPfmi:
s_load_b32 s12, s[0:1], 0x30
s_mov_b32 s2, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s12, v0
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s8, s[0:1], 0x44
s_ashr_i32 s3, s15, 31
v_lshl_add_u32 v3, v0, 2, 0
v_mov_b32_e32 v1, v0
s_waitcnt lgkmcnt(0)
s_mul_i32 s7, s15, s7
s_mul_hi_u32 s9, s15, s6
s_mul_i32 s3, s3, s6
s_add_i32 s7, s9, s7
s_mul_i32 s6, s15, s6
s_add_i32 s7, s7, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[6:7], s[6:7], 2
s_add_u32 s3, s4, s6
s_addc_u32 s4, s5, s7
s_and_b32 s5, s8, 0xffff
s_mov_b32 s6, 0
s_lshl_b32 s7, s5, 2
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[4:5], 2, v[1:2]
v_add_nc_u32_e32 v1, s5, v1
v_add_co_u32 v4, vcc_lo, s3, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v5, vcc_lo, s4, v5, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s12, v1
global_load_b32 v2, v[4:5], off
s_or_b32 s6, vcc_lo, s6
s_waitcnt vmcnt(0)
ds_store_b32 v3, v2
v_add_nc_u32_e32 v3, s7, v3
s_and_not1_b32 exec_lo, exec_lo, s6
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s12, v0
s_cbranch_execz .LBB0_10
s_clause 0x1
s_load_b256 s[4:11], s[0:1], 0x10
s_load_b32 s0, s[0:1], 0x44
s_cmp_gt_i32 s12, 0
s_cselect_b32 s1, -1, 0
s_ashr_i32 s2, s15, 31
s_waitcnt lgkmcnt(0)
s_mul_i32 s3, s15, s11
s_mul_hi_u32 s11, s15, s10
s_mul_i32 s13, s2, s10
s_add_i32 s3, s11, s3
s_mul_i32 s2, s15, s10
s_add_i32 s3, s3, s13
s_mov_b32 s10, 0
s_lshl_b64 s[2:3], s[2:3], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s8, s8, s2
s_addc_u32 s9, s9, s3
s_and_b32 s11, s0, 0xffff
s_lshl_b64 s[2:3], s[6:7], 2
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_7
.p2align 6
.LBB0_5:
v_mov_b32_e32 v4, 0
.LBB0_6:
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[0:1]
v_add_nc_u32_e32 v0, s11, v0
v_cmp_le_i32_e32 vcc_lo, s12, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v1, s0, s8, v1
v_add_co_ci_u32_e64 v2, s0, s9, v2, s0
s_or_b32 s10, vcc_lo, s10
global_store_b32 v[1:2], v4, off
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execz .LBB0_10
.LBB0_7:
v_ashrrev_i32_e32 v1, 31, v0
s_and_not1_b32 vcc_lo, exec_lo, s1
s_cbranch_vccnz .LBB0_5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[0:1]
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, 0
s_mov_b32 s0, 0
s_mov_b32 s6, s12
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
.p2align 6
.LBB0_9:
global_load_b32 v6, v[2:3], off
v_dual_mov_b32 v7, s0 :: v_dual_mov_b32 v8, v4
v_add_co_u32 v2, vcc_lo, v2, s2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
ds_load_b32 v7, v7
s_add_i32 s6, s6, -1
s_add_i32 s0, s0, 4
s_cmp_lg_u32 s6, 0
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fma_f32 v5, -v7, v6, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v4, v8, v5
v_sub_f32_e32 v6, v4, v8
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v5, v5, v6
s_cbranch_scc1 .LBB0_9
s_branch .LBB0_6
.LBB0_10:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _ZL11matMultCUDAPKfmS0_mPfmi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._ZL11matMultCUDAPKfmS0_mPfmi,"axG",@progbits,_ZL11matMultCUDAPKfmS0_mPfmi,comdat
.Lfunc_end0:
.size _ZL11matMultCUDAPKfmS0_mPfmi, .Lfunc_end0-_ZL11matMultCUDAPKfmS0_mPfmi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 8
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: hidden_block_count_x
- .offset: 60
.size: 4
.value_kind: hidden_block_count_y
- .offset: 64
.size: 4
.value_kind: hidden_block_count_z
- .offset: 68
.size: 2
.value_kind: hidden_group_size_x
- .offset: 70
.size: 2
.value_kind: hidden_group_size_y
- .offset: 72
.size: 2
.value_kind: hidden_group_size_z
- .offset: 74
.size: 2
.value_kind: hidden_remainder_x
- .offset: 76
.size: 2
.value_kind: hidden_remainder_y
- .offset: 78
.size: 2
.value_kind: hidden_remainder_z
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 120
.size: 2
.value_kind: hidden_grid_dims
- .offset: 176
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _ZL11matMultCUDAPKfmS0_mPfmi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _ZL11matMultCUDAPKfmS0_mPfmi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define NUM_THREADS 256
bool InitCUDA()
{
int count;
hipGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for(i = 0; i < count; i++) {
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i);
return true;
}
void matgen(float* a, int lda, int n)
{
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
a[i * lda + j] = (float) rand() / RAND_MAX;
}
}
}
void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
int i, j, k;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
double t = 0;
for(k = 0; k < n; k++) {
t += a[i * lda + k] * b[k * ldb + j];
}
c[i * ldc + j] = t;
}
}
}
void compare_mat(const float* a, int lda, const float* b, int ldb, int n)
{
float max_err = 0;
float average_err = 0;
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
if(b[i * ldb + j] != 0) {
float err = fabs((a[i * lda + j] - b[i * ldb + j]) / b[i * ldb + j]);
if(max_err < err) max_err = err;
average_err += err;
}
}
}
printf("Max error: %g Average error: %g\n", max_err, average_err / (n * n));
}
__global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
{
extern __shared__ float data[];
const int tid = threadIdx.x;
// const int bid = blockIdx.x;
// const int idx = bid * blockDim.x + tid;
const int row = blockIdx.x;
// const int column = idx % n;
int i, j;
for(i = tid; i < n; i += blockDim.x) {
data[i] = a[row * lda + i];
}
__syncthreads();
for(j = tid; j < n; j += blockDim.x) {
float t = 0;
float y = 0;
for(i = 0; i < n; i++) {
// t += a[row * lda + i] * b[i * ldb + column];
float r;
y -= data[i] * b[i * ldb + j];
r = t - y;
y = (r - t) + y;
t = r;
}
c[row * ldc + j] = t;
}
}
clock_t matmultCUDA(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
float *ac, *bc, *cc;
clock_t start, end;
start = clock();
// cudaMalloc((void**) &ac, sizeof(float) * n * n);
// cudaMalloc((void**) &bc, sizeof(float) * n * n);
// cudaMalloc((void**) &cc, sizeof(float) * n * n);
size_t pitch_a, pitch_b, pitch_c;
hipMallocPitch((void**) &ac, &pitch_a, sizeof(float) * n, n);
hipMallocPitch((void**) &bc, &pitch_b, sizeof(float) * n, n);
hipMallocPitch((void**) &cc, &pitch_c, sizeof(float) * n, n);
// cudaMemcpy2D(ac, sizeof(float) * n, a, sizeof(float) * lda, sizeof(float) * n, n, cudaMemcpyHostToDevice);
// cudaMemcpy2D(bc, sizeof(float) * n, b, sizeof(float) * ldb, sizeof(float) * n, n, cudaMemcpyHostToDevice);
hipMemcpy2D(ac, pitch_a, a, sizeof(float) * lda, sizeof(float) * n, n, hipMemcpyHostToDevice);
hipMemcpy2D(bc, pitch_b, b, sizeof(float) * ldb, sizeof(float) * n, n, hipMemcpyHostToDevice);
// int blocks = (n + NUM_THREADS - 1) / NUM_THREADS;
// matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, n, bc, n, cc, n, n);
matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, pitch_a / sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n);
// cudaMemcpy2D(c, sizeof(float) * ldc, cc, sizeof(float) * n, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
hipMemcpy2D(c, sizeof(float) * ldc, cc, pitch_c, sizeof(float) * n, n, hipMemcpyDeviceToHost);
hipFree(ac);
hipFree(bc);
hipFree(cc);
end = clock();
return end - start;
}
int main()
{
float *a, *b, *c, *d;
int n = 1000;
if(!InitCUDA()) return 0;
a = (float*) malloc(sizeof(float) * n * n);
b = (float*) malloc(sizeof(float) * n * n);
c = (float*) malloc(sizeof(float) * n * n);
d = (float*) malloc(sizeof(float) * n * n);
srand(0);
matgen(a, n, n);
matgen(b, n, n);
clock_t time = matmultCUDA(a, n, b, n, c, n, n);
clock_t startc, timec;
startc = clock();
matmult(a, n, b, n, d, n, n);
timec = clock() - startc;
compare_mat(c, n, d, n, n);
printf("GPU time used: %f \n", (double) time / CLOCKS_PER_SEC);
printf("CPU time used: %f \n", (double) timec / CLOCKS_PER_SEC);
return 0;
} | .text
.file "2_m_sha.hip"
.globl _Z8InitCUDAv # -- Begin function _Z8InitCUDAv
.p2align 4, 0x90
.type _Z8InitCUDAv,@function
_Z8InitCUDAv: # @_Z8InitCUDAv
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1504
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 4(%rsp)
je .LBB0_3
# %bb.1: # %.preheader
cmpl $0, 4(%rsp)
jle .LBB0_2
# %bb.5: # %.lr.ph
xorl %ebx, %ebx
leaq 8(%rsp), %r14
jmp .LBB0_6
.p2align 4, 0x90
.LBB0_8: # in Loop: Header=BB0_6 Depth=1
incl %ebx
cmpl 4(%rsp), %ebx
jge .LBB0_9
.LBB0_6: # =>This Inner Loop Header: Depth=1
movq %r14, %rdi
movl %ebx, %esi
callq hipGetDevicePropertiesR0600
testl %eax, %eax
jne .LBB0_8
# %bb.7: # in Loop: Header=BB0_6 Depth=1
cmpl $0, 368(%rsp)
jle .LBB0_8
jmp .LBB0_9
.LBB0_2:
xorl %ebx, %ebx
.LBB0_9: # %._crit_edge
cmpl 4(%rsp), %ebx
je .LBB0_10
# %bb.11:
movl %ebx, %edi
callq hipSetDevice
movb $1, %al
.LBB0_12:
# kill: def $al killed $al killed $eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB0_3:
.cfi_def_cfa_offset 1504
movq stderr(%rip), %rcx
movl $.L.str, %edi
movl $20, %esi
jmp .LBB0_4
.LBB0_10:
movq stderr(%rip), %rcx
movl $.L.str.1, %edi
movl $40, %esi
.LBB0_4:
movl $1, %edx
callq fwrite@PLT
xorl %eax, %eax
jmp .LBB0_12
.Lfunc_end0:
.size _Z8InitCUDAv, .Lfunc_end0-_Z8InitCUDAv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z6matgenPfii
.LCPI1_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl _Z6matgenPfii
.p2align 4, 0x90
.type _Z6matgenPfii,@function
_Z6matgenPfii: # @_Z6matgenPfii
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB1_6
# %bb.1: # %.preheader.lr.ph
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movslq %esi, %r14
movl %edx, %r15d
shlq $2, %r14
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_3: # Parent Loop BB1_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%rbx,%r13,4)
incq %r13
cmpq %r13, %r15
jne .LBB1_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB1_2 Depth=1
incq %r12
addq %r14, %rbx
cmpq %r15, %r12
jne .LBB1_2
# %bb.5:
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.LBB1_6: # %._crit_edge13
retq
.Lfunc_end1:
.size _Z6matgenPfii, .Lfunc_end1-_Z6matgenPfii
.cfi_endproc
# -- End function
.globl _Z7matmultPKfiS0_iPfii # -- Begin function _Z7matmultPKfiS0_iPfii
.p2align 4, 0x90
.type _Z7matmultPKfiS0_iPfii,@function
_Z7matmultPKfiS0_iPfii: # @_Z7matmultPKfiS0_iPfii
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl 40(%rsp), %r10d
testl %r10d, %r10d
jle .LBB2_7
# %bb.1: # %.preheader26.lr.ph
movslq %ecx, %rax
movslq %esi, %rcx
movslq %r9d, %rsi
movl %r10d, %r9d
shlq $2, %rcx
shlq $2, %rax
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB2_2: # %.preheader26
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
# Child Loop BB2_4 Depth 3
movq %r10, %r11
imulq %rsi, %r11
leaq (%r8,%r11,4), %r11
movq %rdx, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_3: # %.preheader
# Parent Loop BB2_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_4 Depth 3
xorpd %xmm0, %xmm0
movq %rbx, %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_4: # Parent Loop BB2_2 Depth=1
# Parent Loop BB2_3 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rdi,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r15), %xmm1
cvtss2sd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %r12
addq %rax, %r15
cmpq %r12, %r9
jne .LBB2_4
# %bb.5: # %._crit_edge
# in Loop: Header=BB2_3 Depth=2
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r11,%r14,4)
incq %r14
addq $4, %rbx
cmpq %r9, %r14
jne .LBB2_3
# %bb.6: # %._crit_edge30
# in Loop: Header=BB2_2 Depth=1
incq %r10
addq %rcx, %rdi
cmpq %r9, %r10
jne .LBB2_2
.LBB2_7: # %._crit_edge32
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z7matmultPKfiS0_iPfii, .Lfunc_end2-_Z7matmultPKfiS0_iPfii
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z11compare_matPKfiS0_ii
.LCPI3_0:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl _Z11compare_matPKfiS0_ii
.p2align 4, 0x90
.type _Z11compare_matPKfiS0_ii,@function
_Z11compare_matPKfiS0_ii: # @_Z11compare_matPKfiS0_ii
.cfi_startproc
# %bb.0:
testl %r8d, %r8d
jle .LBB3_1
# %bb.2: # %.preheader.lr.ph
movslq %ecx, %rax
movslq %esi, %rcx
movl %r8d, %esi
shlq $2, %rcx
shlq $2, %rax
xorps %xmm0, %xmm0
xorl %r9d, %r9d
movaps .LCPI3_0(%rip), %xmm2 # xmm2 = [NaN,NaN,NaN,NaN]
xorps %xmm3, %xmm3
xorps %xmm1, %xmm1
jmp .LBB3_3
.p2align 4, 0x90
.LBB3_7: # %._crit_edge
# in Loop: Header=BB3_3 Depth=1
incq %r9
addq %rcx, %rdi
addq %rax, %rdx
cmpq %rsi, %r9
je .LBB3_8
.LBB3_3: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
xorl %r10d, %r10d
jmp .LBB3_4
.p2align 4, 0x90
.LBB3_6: # in Loop: Header=BB3_4 Depth=2
incq %r10
cmpq %r10, %rsi
je .LBB3_7
.LBB3_4: # Parent Loop BB3_3 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdx,%r10,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm4
jne .LBB3_5
jnp .LBB3_6
.LBB3_5: # in Loop: Header=BB3_4 Depth=2
movss (%rdi,%r10,4), %xmm5 # xmm5 = mem[0],zero,zero,zero
subss %xmm4, %xmm5
divss %xmm4, %xmm5
andps %xmm2, %xmm5
addss %xmm5, %xmm1
maxss %xmm3, %xmm5
movaps %xmm5, %xmm3
jmp .LBB3_6
.LBB3_8: # %._crit_edge46.loopexit
xorps %xmm0, %xmm0
cvtss2sd %xmm3, %xmm0
jmp .LBB3_9
.LBB3_1:
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
.LBB3_9: # %._crit_edge46
imull %r8d, %r8d
xorps %xmm2, %xmm2
cvtsi2ss %r8d, %xmm2
divss %xmm2, %xmm1
cvtss2sd %xmm1, %xmm1
movl $.L.str.2, %edi
movb $2, %al
jmp printf # TAILCALL
.Lfunc_end3:
.size _Z11compare_matPKfiS0_ii, .Lfunc_end3-_Z11compare_matPKfiS0_ii
.cfi_endproc
# -- End function
.globl _Z11matmultCUDAPKfiS0_iPfii # -- Begin function _Z11matmultCUDAPKfiS0_iPfii
.p2align 4, 0x90
.type _Z11matmultCUDAPKfiS0_iPfii,@function
_Z11matmultCUDAPKfiS0_iPfii: # @_Z11matmultCUDAPKfiS0_iPfii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $248, %rsp
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, 48(%rsp) # 4-byte Spill
movq %r8, 80(%rsp) # 8-byte Spill
movl %ecx, %ebx
movq %rdx, %r13
movl %esi, %ebp
movq %rdi, %r14
movl 304(%rsp), %r15d
callq clock
movq %rax, 88(%rsp) # 8-byte Spill
movslq %r15d, %r15
leaq (,%r15,4), %r12
leaq 40(%rsp), %rdi
leaq 72(%rsp), %rsi
movq %r12, %rdx
movq %r15, %rcx
callq hipMallocPitch
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
movq %r12, %rdx
movq %r15, %rcx
callq hipMallocPitch
leaq 24(%rsp), %rdi
leaq 56(%rsp), %rsi
movq %r12, %rdx
movq %r15, %rcx
callq hipMallocPitch
movq 40(%rsp), %rdi
movq 72(%rsp), %rsi
movslq %ebp, %rcx
shlq $2, %rcx
movl $1, (%rsp)
movq %r14, %rdx
movq %r12, %r8
movq %r15, %r9
callq hipMemcpy2D
movq 32(%rsp), %rdi
movq 64(%rsp), %rsi
movslq %ebx, %rcx
movl 304(%rsp), %ebx
shlq $2, %rcx
movl $1, (%rsp)
movq %r13, %rdx
movq %r12, %r8
movq %r15, %r9
callq hipMemcpy2D
movabsq $4294967296, %rdx # imm = 0x100000000
movq %rbx, %rdi
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
movq %r12, %r8
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq 40(%rsp), %rax
movq 72(%rsp), %rcx
shrq $2, %rcx
movq 32(%rsp), %rdx
movq 64(%rsp), %rsi
shrq $2, %rsi
movq 24(%rsp), %rdi
movq 56(%rsp), %r8
shrq $2, %r8
movq %rax, 184(%rsp)
movq %rcx, 176(%rsp)
movq %rdx, 168(%rsp)
movq %rsi, 160(%rsp)
movq %rdi, 152(%rsp)
movq %r8, 144(%rsp)
movl %ebx, 52(%rsp)
leaq 184(%rsp), %rax
movq %rax, 192(%rsp)
leaq 176(%rsp), %rax
movq %rax, 200(%rsp)
leaq 168(%rsp), %rax
movq %rax, 208(%rsp)
leaq 160(%rsp), %rax
movq %rax, 216(%rsp)
leaq 152(%rsp), %rax
movq %rax, 224(%rsp)
leaq 144(%rsp), %rax
movq %rax, 232(%rsp)
leaq 52(%rsp), %rax
movq %rax, 240(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rax
movq 96(%rsp), %rdi
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
movq %rdi, 8(%rsp)
movq %rax, (%rsp)
leaq 192(%rsp), %r9
movl $_ZL11matMultCUDAPKfmS0_mPfmi, %edi
callq hipLaunchKernel
.LBB4_2:
movslq 48(%rsp), %rsi # 4-byte Folded Reload
shlq $2, %rsi
movq 24(%rsp), %rdx
movq 56(%rsp), %rcx
movl $2, (%rsp)
movq 80(%rsp), %rdi # 8-byte Reload
movq %r12, %r8
movq %r15, %r9
callq hipMemcpy2D
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
callq clock
subq 88(%rsp), %rax # 8-byte Folded Reload
addq $248, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z11matmultCUDAPKfiS0_iPfii, .Lfunc_end4-_Z11matmultCUDAPKfiS0_iPfii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function _ZL26__device_stub__matMultCUDAPKfmS0_mPfmi
.type _ZL26__device_stub__matMultCUDAPKfmS0_mPfmi,@function
_ZL26__device_stub__matMultCUDAPKfmS0_mPfmi: # @_ZL26__device_stub__matMultCUDAPKfmS0_mPfmi
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_ZL11matMultCUDAPKfmS0_mPfmi, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end5:
.size _ZL26__device_stub__matMultCUDAPKfmS0_mPfmi, .Lfunc_end5-_ZL26__device_stub__matMultCUDAPKfmS0_mPfmi
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI6_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI6_2:
.long 0x49742400 # float 1.0E+6
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI6_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI6_3:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
callq _Z8InitCUDAv
testb %al, %al
je .LBB6_22
# %bb.1:
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r15
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, 8(%rsp) # 8-byte Spill
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %rbx
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r14
xorl %r13d, %r13d
xorl %edi, %edi
callq srand
movq %r15, %rbp
.p2align 4, 0x90
.LBB6_2: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB6_3 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB6_3: # Parent Loop BB6_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI6_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%rbp,%r12,4)
incq %r12
cmpq $1000, %r12 # imm = 0x3E8
jne .LBB6_3
# %bb.4: # %._crit_edge.i
# in Loop: Header=BB6_2 Depth=1
incq %r13
addq $4000, %rbp # imm = 0xFA0
cmpq $1000, %r13 # imm = 0x3E8
jne .LBB6_2
# %bb.5: # %.preheader.i36.preheader
xorl %r13d, %r13d
movq 8(%rsp), %rbp # 8-byte Reload
.p2align 4, 0x90
.LBB6_6: # %.preheader.i36
# =>This Loop Header: Depth=1
# Child Loop BB6_7 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB6_7: # Parent Loop BB6_6 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI6_0(%rip), %xmm0
movss %xmm0, (%rbp,%r12,4)
incq %r12
cmpq $1000, %r12 # imm = 0x3E8
jne .LBB6_7
# %bb.8: # %._crit_edge.i41
# in Loop: Header=BB6_6 Depth=1
incq %r13
addq $4000, %rbp # imm = 0xFA0
cmpq $1000, %r13 # imm = 0x3E8
jne .LBB6_6
# %bb.9: # %_Z6matgenPfii.exit44
movl $1000, (%rsp) # imm = 0x3E8
movq %r15, %rdi
movl $1000, %esi # imm = 0x3E8
movq 8(%rsp), %r12 # 8-byte Reload
movq %r12, %rdx
movl $1000, %ecx # imm = 0x3E8
movq %rbx, %r8
movl $1000, %r9d # imm = 0x3E8
callq _Z11matmultCUDAPKfiS0_iPfii
movq %rax, 16(%rsp) # 8-byte Spill
xorl %r13d, %r13d
callq clock
movq %rax, %rbp
.p2align 4, 0x90
.LBB6_10: # %.preheader26.i
# =>This Loop Header: Depth=1
# Child Loop BB6_11 Depth 2
# Child Loop BB6_12 Depth 3
imulq $4000, %r13, %rax # imm = 0xFA0
addq %r14, %rax
movq %r12, %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB6_11: # %.preheader.i45
# Parent Loop BB6_10 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB6_12 Depth 3
xorps %xmm0, %xmm0
movq %rcx, %rsi
xorl %edi, %edi
.p2align 4, 0x90
.LBB6_12: # Parent Loop BB6_10 Depth=1
# Parent Loop BB6_11 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r15,%rdi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rsi), %xmm1
cvtss2sd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %rdi
addq $4000, %rsi # imm = 0xFA0
cmpq $1000, %rdi # imm = 0x3E8
jne .LBB6_12
# %bb.13: # %._crit_edge.i49
# in Loop: Header=BB6_11 Depth=2
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%rax,%rdx,4)
incq %rdx
addq $4, %rcx
cmpq $1000, %rdx # imm = 0x3E8
jne .LBB6_11
# %bb.14: # %._crit_edge30.i
# in Loop: Header=BB6_10 Depth=1
incq %r13
addq $4000, %r15 # imm = 0xFA0
cmpq $1000, %r13 # imm = 0x3E8
jne .LBB6_10
# %bb.15: # %_Z7matmultPKfiS0_iPfii.exit
xorl %r12d, %r12d
callq clock
xorps %xmm5, %xmm5
movq %rax, %r15
movaps .LCPI6_1(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
xorps %xmm2, %xmm2
xorps %xmm1, %xmm1
jmp .LBB6_16
.p2align 4, 0x90
.LBB6_20: # %._crit_edge.i54
# in Loop: Header=BB6_16 Depth=1
incq %r12
addq $4000, %rbx # imm = 0xFA0
addq $4000, %r14 # imm = 0xFA0
cmpq $1000, %r12 # imm = 0x3E8
je .LBB6_21
.LBB6_16: # %.preheader.i50
# =>This Loop Header: Depth=1
# Child Loop BB6_17 Depth 2
xorl %eax, %eax
jmp .LBB6_17
.p2align 4, 0x90
.LBB6_19: # in Loop: Header=BB6_17 Depth=2
incq %rax
cmpq $1000, %rax # imm = 0x3E8
je .LBB6_20
.LBB6_17: # Parent Loop BB6_16 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%rax,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
ucomiss %xmm5, %xmm3
jne .LBB6_18
jnp .LBB6_19
.LBB6_18: # in Loop: Header=BB6_17 Depth=2
movss (%rbx,%rax,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
subss %xmm3, %xmm4
divss %xmm3, %xmm4
andps %xmm0, %xmm4
addss %xmm4, %xmm1
maxss %xmm2, %xmm4
movaps %xmm4, %xmm2
jmp .LBB6_19
.LBB6_21: # %_Z11compare_matPKfiS0_ii.exit
subq %rbp, %r15
xorps %xmm0, %xmm0
cvtss2sd %xmm2, %xmm0
divss .LCPI6_2(%rip), %xmm1
cvtss2sd %xmm1, %xmm1
movl $.L.str.2, %edi
movb $2, %al
callq printf
xorps %xmm0, %xmm0
cvtsi2sdq 16(%rsp), %xmm0 # 8-byte Folded Reload
divsd .LCPI6_3(%rip), %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
xorps %xmm0, %xmm0
cvtsi2sd %r15, %xmm0
divsd .LCPI6_3(%rip), %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
.LBB6_22:
xorl %eax, %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_ZL11matMultCUDAPKfmS0_mPfmi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "There is no device.\n"
.size .L.str, 21
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "There is no device supporting CUDA 1.x.\n"
.size .L.str.1, 41
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Max error: %g Average error: %g\n"
.size .L.str.2, 33
.type _ZL11matMultCUDAPKfmS0_mPfmi,@object # @_ZL11matMultCUDAPKfmS0_mPfmi
.section .rodata,"a",@progbits
.p2align 3, 0x0
_ZL11matMultCUDAPKfmS0_mPfmi:
.quad _ZL26__device_stub__matMultCUDAPKfmS0_mPfmi
.size _ZL11matMultCUDAPKfmS0_mPfmi, 8
.type .L.str.3,@object # @.str.3
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.3:
.asciz "GPU time used: %f \n"
.size .L.str.3, 20
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "CPU time used: %f \n"
.size .L.str.4, 20
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_ZL11matMultCUDAPKfmS0_mPfmi"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZL26__device_stub__matMultCUDAPKfmS0_mPfmi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZL11matMultCUDAPKfmS0_mPfmi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0014ed6f_00000000-6_2_m_sha.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi, @function
_ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi:
.LFB2087:
.cfi_startproc
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L5
.L1:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _ZL11matMultCUDAPKfmS0_mPfmi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L1
.L6:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi, .-_ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi
.type _ZL11matMultCUDAPKfmS0_mPfmi, @function
_ZL11matMultCUDAPKfmS0_mPfmi:
.LFB2088:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL11matMultCUDAPKfmS0_mPfmi, .-_ZL11matMultCUDAPKfmS0_mPfmi
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "There is no device.\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "There is no device supporting CUDA 1.x.\n"
.text
.globl _Z8InitCUDAv
.type _Z8InitCUDAv, @function
_Z8InitCUDAv:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $1064, %rsp
.cfi_def_cfa_offset 1088
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %eax
testl %eax, %eax
je .L24
movl $0, %ebx
leaq 16(%rsp), %rbp
jg .L16
jmp .L18
.L24:
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
movl $0, %eax
jmp .L11
.L14:
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jle .L17
.L16:
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
testl %eax, %eax
jne .L14
cmpl $0, 376(%rsp)
jle .L14
.L17:
cmpl %ebx, 12(%rsp)
je .L25
.L18:
movl %ebx, %edi
call cudaSetDevice@PLT
movl $1, %eax
.L11:
movq 1048(%rsp), %rdx
subq %fs:40, %rdx
jne .L26
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %eax
jmp .L11
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z8InitCUDAv, .-_Z8InitCUDAv
.globl _Z6matgenPfii
.type _Z6matgenPfii, @function
_Z6matgenPfii:
.LFB2058:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L33
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %edx, %r15d
movslq %esi, %rsi
leaq 0(,%rsi,4), %r13
movslq %edx, %r14
leaq (%rdi,%r14,4), %rbp
negq %r14
salq $2, %r14
movl $0, %r12d
.L29:
leaq 0(%rbp,%r14), %rbx
.L30:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC2(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L30
addl $1, %r12d
addq %r13, %rbp
cmpl %r12d, %r15d
jne .L29
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2058:
.size _Z6matgenPfii, .-_Z6matgenPfii
.globl _Z7matmultPKfiS0_iPfii
.type _Z7matmultPKfiS0_iPfii, @function
_Z7matmultPKfiS0_iPfii:
.LFB2059:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl 48(%rsp), %r12d
testl %r12d, %r12d
jle .L36
movq %rdx, %r11
movl %ecx, %eax
movslq %r9d, %rbp
salq $2, %rbp
movslq %esi, %rsi
leaq 0(,%rsi,4), %rbx
movq %rdi, %r9
movslq %r12d, %r10
leaq (%rdi,%r10,4), %rcx
cltq
leaq 0(,%rax,4), %rsi
movl $0, %r14d
.L38:
movq %r11, %r13
movl $0, %edi
.L41:
movq %r13, %rdx
movq %r9, %rax
pxor %xmm1, %xmm1
.L39:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
cvtss2sd %xmm0, %xmm0
addsd %xmm0, %xmm1
addq $4, %rax
addq %rsi, %rdx
cmpq %rcx, %rax
jne .L39
cvtsd2ss %xmm1, %xmm1
movss %xmm1, (%r8,%rdi,4)
addq $1, %rdi
addq $4, %r13
cmpq %r10, %rdi
jne .L41
addl $1, %r14d
addq %rbp, %r8
addq %rbx, %r9
addq %rbx, %rcx
cmpl %r14d, %r12d
jne .L38
.L36:
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z7matmultPKfiS0_iPfii, .-_Z7matmultPKfiS0_iPfii
.section .rodata.str1.8
.align 8
.LC6:
.string "Max error: %g Average error: %g\n"
.text
.globl _Z11compare_matPKfiS0_ii
.type _Z11compare_matPKfiS0_ii, @function
_Z11compare_matPKfiS0_ii:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
testl %r8d, %r8d
jle .L51
movq %rdi, %r9
movl %esi, %r10d
movl %r8d, %esi
movslq %ecx, %rcx
leaq 0(,%rcx,4), %rbx
movl $0, %ebp
movl $0, %r11d
pxor %xmm1, %xmm1
movaps %xmm1, %xmm4
movaps %xmm1, %xmm3
movss .LC5(%rip), %xmm5
jmp .L46
.L53:
movslq %eax, %rdi
movss (%r9,%rdi,4), %xmm0
subss %xmm2, %xmm0
divss %xmm2, %xmm0
andps %xmm5, %xmm0
movaps %xmm0, %xmm6
maxss %xmm4, %xmm6
movaps %xmm6, %xmm4
addss %xmm0, %xmm1
.L47:
addq $4, %rcx
addl $1, %eax
cmpl %eax, %esi
je .L56
.L50:
movss (%rcx), %xmm2
ucomiss %xmm3, %xmm2
jp .L53
je .L47
jmp .L53
.L56:
addl $1, %r11d
addl %r10d, %esi
addq %rbx, %rdx
addl %r10d, %ebp
cmpl %r11d, %r8d
je .L45
.L46:
movl %ebp, %eax
movq %rdx, %rcx
jmp .L50
.L51:
pxor %xmm1, %xmm1
movaps %xmm1, %xmm4
.L45:
imull %r8d, %r8d
pxor %xmm0, %xmm0
cvtsi2ssl %r8d, %xmm0
divss %xmm0, %xmm1
pxor %xmm0, %xmm0
cvtss2sd %xmm4, %xmm0
cvtss2sd %xmm1, %xmm1
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z11compare_matPKfiS0_ii, .-_Z11compare_matPKfiS0_ii
.globl _Z11matmultCUDAPKfiS0_iPfii
.type _Z11matmultCUDAPKfiS0_iPfii, @function
_Z11matmultCUDAPKfiS0_iPfii:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %rdi, (%rsp)
movl %esi, %r13d
movq %rdx, 8(%rsp)
movl %ecx, %r12d
movq %r8, 16(%rsp)
movl %r9d, 28(%rsp)
movl 176(%rsp), %r15d
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
call clock@PLT
movq %rax, %r14
movslq %r15d, %rbp
leaq 0(,%rbp,4), %rbx
leaq 56(%rsp), %rsi
leaq 32(%rsp), %rdi
movq %rbp, %rcx
movq %rbx, %rdx
call cudaMallocPitch@PLT
leaq 64(%rsp), %rsi
leaq 40(%rsp), %rdi
movq %rbp, %rcx
movq %rbx, %rdx
call cudaMallocPitch@PLT
leaq 72(%rsp), %rsi
leaq 48(%rsp), %rdi
movq %rbp, %rcx
movq %rbx, %rdx
call cudaMallocPitch@PLT
movslq %r13d, %rcx
salq $2, %rcx
subq $8, %rsp
.cfi_def_cfa_offset 184
pushq $1
.cfi_def_cfa_offset 192
movq %rbp, %r9
movq %rbx, %r8
movq 16(%rsp), %rdx
movq 72(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy2D@PLT
movslq %r12d, %rcx
salq $2, %rcx
movl $1, (%rsp)
movq %rbp, %r9
movq %rbx, %r8
movq 24(%rsp), %rdx
movq 80(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy2D@PLT
movl $256, 108(%rsp)
movl $1, 112(%rsp)
movl %r15d, 96(%rsp)
movl $1, 100(%rsp)
addq $16, %rsp
.cfi_def_cfa_offset 176
movl $0, %r9d
movq %rbx, %r8
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L61
.L58:
movslq 28(%rsp), %rsi
salq $2, %rsi
subq $8, %rsp
.cfi_def_cfa_offset 184
pushq $2
.cfi_def_cfa_offset 192
movq %rbp, %r9
movq %rbx, %r8
movq 88(%rsp), %rcx
movq 64(%rsp), %rdx
movq 32(%rsp), %rdi
call cudaMemcpy2D@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
call clock@PLT
subq %r14, %rax
movq 104(%rsp), %rdx
subq %fs:40, %rdx
jne .L62
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L61:
.cfi_restore_state
movq 64(%rsp), %rcx
shrq $2, %rcx
movq 56(%rsp), %rsi
shrq $2, %rsi
subq $8, %rsp
.cfi_def_cfa_offset 184
pushq %r15
.cfi_def_cfa_offset 192
movq 88(%rsp), %r9
shrq $2, %r9
movq 64(%rsp), %r8
movq 56(%rsp), %rdx
movq 48(%rsp), %rdi
call _ZL41__device_stub__Z11matMultCUDAPKfmS0_mPfmiPKfmS0_mPfmi
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L58
.L62:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z11matmultCUDAPKfiS0_iPfii, .-_Z11matmultCUDAPKfiS0_iPfii
.section .rodata.str1.1
.LC8:
.string "GPU time used: %f \n"
.LC9:
.string "CPU time used: %f \n"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
call _Z8InitCUDAv
testb %al, %al
jne .L66
.L64:
movl $0, %eax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L66:
.cfi_restore_state
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $4000000, %edi
call malloc@PLT
movq %rax, %r12
movl $4000000, %edi
call malloc@PLT
movq %rax, %r13
movl $0, %edi
call srand@PLT
movl $1000, %edx
movl $1000, %esi
movq %rbx, %rdi
call _Z6matgenPfii
movl $1000, %edx
movl $1000, %esi
movq %rbp, %rdi
call _Z6matgenPfii
subq $8, %rsp
.cfi_def_cfa_offset 72
pushq $1000
.cfi_def_cfa_offset 80
movl $1000, %r9d
movq %r12, %r8
movl $1000, %ecx
movq %rbp, %rdx
movl $1000, %esi
movq %rbx, %rdi
call _Z11matmultCUDAPKfiS0_iPfii
movq %rax, %r14
call clock@PLT
movq %rax, %r15
movl $1000, (%rsp)
movl $1000, %r9d
movq %r13, %r8
movl $1000, %ecx
movq %rbp, %rdx
movl $1000, %esi
movq %rbx, %rdi
call _Z7matmultPKfiS0_iPfii
call clock@PLT
subq %r15, %rax
movq %rax, %rbx
addq $16, %rsp
.cfi_def_cfa_offset 64
movl $1000, %r8d
movl $1000, %ecx
movq %r13, %rdx
movl $1000, %esi
movq %r12, %rdi
call _Z11compare_matPKfiS0_ii
pxor %xmm0, %xmm0
cvtsi2sdq %r14, %xmm0
divsd .LC7(%rip), %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
divsd .LC7(%rip), %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L64
.cfi_endproc
.LFE2062:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z11matMultCUDAPKfmS0_mPfmi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL11matMultCUDAPKfmS0_mPfmi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 805306368
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC5:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC7:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "2_m_sha.hip"
.globl _Z8InitCUDAv # -- Begin function _Z8InitCUDAv
.p2align 4, 0x90
.type _Z8InitCUDAv,@function
_Z8InitCUDAv: # @_Z8InitCUDAv
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1504
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 4(%rsp)
je .LBB0_3
# %bb.1: # %.preheader
cmpl $0, 4(%rsp)
jle .LBB0_2
# %bb.5: # %.lr.ph
xorl %ebx, %ebx
leaq 8(%rsp), %r14
jmp .LBB0_6
.p2align 4, 0x90
.LBB0_8: # in Loop: Header=BB0_6 Depth=1
incl %ebx
cmpl 4(%rsp), %ebx
jge .LBB0_9
.LBB0_6: # =>This Inner Loop Header: Depth=1
movq %r14, %rdi
movl %ebx, %esi
callq hipGetDevicePropertiesR0600
testl %eax, %eax
jne .LBB0_8
# %bb.7: # in Loop: Header=BB0_6 Depth=1
cmpl $0, 368(%rsp)
jle .LBB0_8
jmp .LBB0_9
.LBB0_2:
xorl %ebx, %ebx
.LBB0_9: # %._crit_edge
cmpl 4(%rsp), %ebx
je .LBB0_10
# %bb.11:
movl %ebx, %edi
callq hipSetDevice
movb $1, %al
.LBB0_12:
# kill: def $al killed $al killed $eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB0_3:
.cfi_def_cfa_offset 1504
movq stderr(%rip), %rcx
movl $.L.str, %edi
movl $20, %esi
jmp .LBB0_4
.LBB0_10:
movq stderr(%rip), %rcx
movl $.L.str.1, %edi
movl $40, %esi
.LBB0_4:
movl $1, %edx
callq fwrite@PLT
xorl %eax, %eax
jmp .LBB0_12
.Lfunc_end0:
.size _Z8InitCUDAv, .Lfunc_end0-_Z8InitCUDAv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z6matgenPfii
.LCPI1_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl _Z6matgenPfii
.p2align 4, 0x90
.type _Z6matgenPfii,@function
_Z6matgenPfii: # @_Z6matgenPfii
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB1_6
# %bb.1: # %.preheader.lr.ph
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movslq %esi, %r14
movl %edx, %r15d
shlq $2, %r14
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_3: # Parent Loop BB1_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%rbx,%r13,4)
incq %r13
cmpq %r13, %r15
jne .LBB1_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB1_2 Depth=1
incq %r12
addq %r14, %rbx
cmpq %r15, %r12
jne .LBB1_2
# %bb.5:
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.LBB1_6: # %._crit_edge13
retq
.Lfunc_end1:
.size _Z6matgenPfii, .Lfunc_end1-_Z6matgenPfii
.cfi_endproc
# -- End function
.globl _Z7matmultPKfiS0_iPfii # -- Begin function _Z7matmultPKfiS0_iPfii
.p2align 4, 0x90
.type _Z7matmultPKfiS0_iPfii,@function
_Z7matmultPKfiS0_iPfii: # @_Z7matmultPKfiS0_iPfii
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl 40(%rsp), %r10d
testl %r10d, %r10d
jle .LBB2_7
# %bb.1: # %.preheader26.lr.ph
movslq %ecx, %rax
movslq %esi, %rcx
movslq %r9d, %rsi
movl %r10d, %r9d
shlq $2, %rcx
shlq $2, %rax
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB2_2: # %.preheader26
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
# Child Loop BB2_4 Depth 3
movq %r10, %r11
imulq %rsi, %r11
leaq (%r8,%r11,4), %r11
movq %rdx, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_3: # %.preheader
# Parent Loop BB2_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_4 Depth 3
xorpd %xmm0, %xmm0
movq %rbx, %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_4: # Parent Loop BB2_2 Depth=1
# Parent Loop BB2_3 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rdi,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r15), %xmm1
cvtss2sd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %r12
addq %rax, %r15
cmpq %r12, %r9
jne .LBB2_4
# %bb.5: # %._crit_edge
# in Loop: Header=BB2_3 Depth=2
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r11,%r14,4)
incq %r14
addq $4, %rbx
cmpq %r9, %r14
jne .LBB2_3
# %bb.6: # %._crit_edge30
# in Loop: Header=BB2_2 Depth=1
incq %r10
addq %rcx, %rdi
cmpq %r9, %r10
jne .LBB2_2
.LBB2_7: # %._crit_edge32
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z7matmultPKfiS0_iPfii, .Lfunc_end2-_Z7matmultPKfiS0_iPfii
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z11compare_matPKfiS0_ii
.LCPI3_0:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl _Z11compare_matPKfiS0_ii
.p2align 4, 0x90
.type _Z11compare_matPKfiS0_ii,@function
_Z11compare_matPKfiS0_ii: # @_Z11compare_matPKfiS0_ii
.cfi_startproc
# %bb.0:
testl %r8d, %r8d
jle .LBB3_1
# %bb.2: # %.preheader.lr.ph
movslq %ecx, %rax
movslq %esi, %rcx
movl %r8d, %esi
shlq $2, %rcx
shlq $2, %rax
xorps %xmm0, %xmm0
xorl %r9d, %r9d
movaps .LCPI3_0(%rip), %xmm2 # xmm2 = [NaN,NaN,NaN,NaN]
xorps %xmm3, %xmm3
xorps %xmm1, %xmm1
jmp .LBB3_3
.p2align 4, 0x90
.LBB3_7: # %._crit_edge
# in Loop: Header=BB3_3 Depth=1
incq %r9
addq %rcx, %rdi
addq %rax, %rdx
cmpq %rsi, %r9
je .LBB3_8
.LBB3_3: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
xorl %r10d, %r10d
jmp .LBB3_4
.p2align 4, 0x90
.LBB3_6: # in Loop: Header=BB3_4 Depth=2
incq %r10
cmpq %r10, %rsi
je .LBB3_7
.LBB3_4: # Parent Loop BB3_3 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdx,%r10,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm4
jne .LBB3_5
jnp .LBB3_6
.LBB3_5: # in Loop: Header=BB3_4 Depth=2
movss (%rdi,%r10,4), %xmm5 # xmm5 = mem[0],zero,zero,zero
subss %xmm4, %xmm5
divss %xmm4, %xmm5
andps %xmm2, %xmm5
addss %xmm5, %xmm1
maxss %xmm3, %xmm5
movaps %xmm5, %xmm3
jmp .LBB3_6
.LBB3_8: # %._crit_edge46.loopexit
xorps %xmm0, %xmm0
cvtss2sd %xmm3, %xmm0
jmp .LBB3_9
.LBB3_1:
xorps %xmm0, %xmm0
xorps %xmm1, %xmm1
.LBB3_9: # %._crit_edge46
imull %r8d, %r8d
xorps %xmm2, %xmm2
cvtsi2ss %r8d, %xmm2
divss %xmm2, %xmm1
cvtss2sd %xmm1, %xmm1
movl $.L.str.2, %edi
movb $2, %al
jmp printf # TAILCALL
.Lfunc_end3:
.size _Z11compare_matPKfiS0_ii, .Lfunc_end3-_Z11compare_matPKfiS0_ii
.cfi_endproc
# -- End function
.globl _Z11matmultCUDAPKfiS0_iPfii # -- Begin function _Z11matmultCUDAPKfiS0_iPfii
.p2align 4, 0x90
.type _Z11matmultCUDAPKfiS0_iPfii,@function
_Z11matmultCUDAPKfiS0_iPfii: # @_Z11matmultCUDAPKfiS0_iPfii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $248, %rsp
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, 48(%rsp) # 4-byte Spill
movq %r8, 80(%rsp) # 8-byte Spill
movl %ecx, %ebx
movq %rdx, %r13
movl %esi, %ebp
movq %rdi, %r14
movl 304(%rsp), %r15d
callq clock
movq %rax, 88(%rsp) # 8-byte Spill
movslq %r15d, %r15
leaq (,%r15,4), %r12
leaq 40(%rsp), %rdi
leaq 72(%rsp), %rsi
movq %r12, %rdx
movq %r15, %rcx
callq hipMallocPitch
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
movq %r12, %rdx
movq %r15, %rcx
callq hipMallocPitch
leaq 24(%rsp), %rdi
leaq 56(%rsp), %rsi
movq %r12, %rdx
movq %r15, %rcx
callq hipMallocPitch
movq 40(%rsp), %rdi
movq 72(%rsp), %rsi
movslq %ebp, %rcx
shlq $2, %rcx
movl $1, (%rsp)
movq %r14, %rdx
movq %r12, %r8
movq %r15, %r9
callq hipMemcpy2D
movq 32(%rsp), %rdi
movq 64(%rsp), %rsi
movslq %ebx, %rcx
movl 304(%rsp), %ebx
shlq $2, %rcx
movl $1, (%rsp)
movq %r13, %rdx
movq %r12, %r8
movq %r15, %r9
callq hipMemcpy2D
movabsq $4294967296, %rdx # imm = 0x100000000
movq %rbx, %rdi
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
movq %r12, %r8
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq 40(%rsp), %rax
movq 72(%rsp), %rcx
shrq $2, %rcx
movq 32(%rsp), %rdx
movq 64(%rsp), %rsi
shrq $2, %rsi
movq 24(%rsp), %rdi
movq 56(%rsp), %r8
shrq $2, %r8
movq %rax, 184(%rsp)
movq %rcx, 176(%rsp)
movq %rdx, 168(%rsp)
movq %rsi, 160(%rsp)
movq %rdi, 152(%rsp)
movq %r8, 144(%rsp)
movl %ebx, 52(%rsp)
leaq 184(%rsp), %rax
movq %rax, 192(%rsp)
leaq 176(%rsp), %rax
movq %rax, 200(%rsp)
leaq 168(%rsp), %rax
movq %rax, 208(%rsp)
leaq 160(%rsp), %rax
movq %rax, 216(%rsp)
leaq 152(%rsp), %rax
movq %rax, 224(%rsp)
leaq 144(%rsp), %rax
movq %rax, 232(%rsp)
leaq 52(%rsp), %rax
movq %rax, 240(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rax
movq 96(%rsp), %rdi
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
movq %rdi, 8(%rsp)
movq %rax, (%rsp)
leaq 192(%rsp), %r9
movl $_ZL11matMultCUDAPKfmS0_mPfmi, %edi
callq hipLaunchKernel
.LBB4_2:
movslq 48(%rsp), %rsi # 4-byte Folded Reload
shlq $2, %rsi
movq 24(%rsp), %rdx
movq 56(%rsp), %rcx
movl $2, (%rsp)
movq 80(%rsp), %rdi # 8-byte Reload
movq %r12, %r8
movq %r15, %r9
callq hipMemcpy2D
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
callq clock
subq 88(%rsp), %rax # 8-byte Folded Reload
addq $248, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z11matmultCUDAPKfiS0_iPfii, .Lfunc_end4-_Z11matmultCUDAPKfiS0_iPfii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function _ZL26__device_stub__matMultCUDAPKfmS0_mPfmi
.type _ZL26__device_stub__matMultCUDAPKfmS0_mPfmi,@function
_ZL26__device_stub__matMultCUDAPKfmS0_mPfmi: # @_ZL26__device_stub__matMultCUDAPKfmS0_mPfmi
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_ZL11matMultCUDAPKfmS0_mPfmi, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end5:
.size _ZL26__device_stub__matMultCUDAPKfmS0_mPfmi, .Lfunc_end5-_ZL26__device_stub__matMultCUDAPKfmS0_mPfmi
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI6_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI6_2:
.long 0x49742400 # float 1.0E+6
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI6_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI6_3:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
callq _Z8InitCUDAv
testb %al, %al
je .LBB6_22
# %bb.1:
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r15
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, 8(%rsp) # 8-byte Spill
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %rbx
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r14
xorl %r13d, %r13d
xorl %edi, %edi
callq srand
movq %r15, %rbp
.p2align 4, 0x90
.LBB6_2: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB6_3 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB6_3: # Parent Loop BB6_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI6_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%rbp,%r12,4)
incq %r12
cmpq $1000, %r12 # imm = 0x3E8
jne .LBB6_3
# %bb.4: # %._crit_edge.i
# in Loop: Header=BB6_2 Depth=1
incq %r13
addq $4000, %rbp # imm = 0xFA0
cmpq $1000, %r13 # imm = 0x3E8
jne .LBB6_2
# %bb.5: # %.preheader.i36.preheader
xorl %r13d, %r13d
movq 8(%rsp), %rbp # 8-byte Reload
.p2align 4, 0x90
.LBB6_6: # %.preheader.i36
# =>This Loop Header: Depth=1
# Child Loop BB6_7 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB6_7: # Parent Loop BB6_6 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI6_0(%rip), %xmm0
movss %xmm0, (%rbp,%r12,4)
incq %r12
cmpq $1000, %r12 # imm = 0x3E8
jne .LBB6_7
# %bb.8: # %._crit_edge.i41
# in Loop: Header=BB6_6 Depth=1
incq %r13
addq $4000, %rbp # imm = 0xFA0
cmpq $1000, %r13 # imm = 0x3E8
jne .LBB6_6
# %bb.9: # %_Z6matgenPfii.exit44
movl $1000, (%rsp) # imm = 0x3E8
movq %r15, %rdi
movl $1000, %esi # imm = 0x3E8
movq 8(%rsp), %r12 # 8-byte Reload
movq %r12, %rdx
movl $1000, %ecx # imm = 0x3E8
movq %rbx, %r8
movl $1000, %r9d # imm = 0x3E8
callq _Z11matmultCUDAPKfiS0_iPfii
movq %rax, 16(%rsp) # 8-byte Spill
xorl %r13d, %r13d
callq clock
movq %rax, %rbp
.p2align 4, 0x90
.LBB6_10: # %.preheader26.i
# =>This Loop Header: Depth=1
# Child Loop BB6_11 Depth 2
# Child Loop BB6_12 Depth 3
imulq $4000, %r13, %rax # imm = 0xFA0
addq %r14, %rax
movq %r12, %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB6_11: # %.preheader.i45
# Parent Loop BB6_10 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB6_12 Depth 3
xorps %xmm0, %xmm0
movq %rcx, %rsi
xorl %edi, %edi
.p2align 4, 0x90
.LBB6_12: # Parent Loop BB6_10 Depth=1
# Parent Loop BB6_11 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r15,%rdi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rsi), %xmm1
cvtss2sd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %rdi
addq $4000, %rsi # imm = 0xFA0
cmpq $1000, %rdi # imm = 0x3E8
jne .LBB6_12
# %bb.13: # %._crit_edge.i49
# in Loop: Header=BB6_11 Depth=2
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%rax,%rdx,4)
incq %rdx
addq $4, %rcx
cmpq $1000, %rdx # imm = 0x3E8
jne .LBB6_11
# %bb.14: # %._crit_edge30.i
# in Loop: Header=BB6_10 Depth=1
incq %r13
addq $4000, %r15 # imm = 0xFA0
cmpq $1000, %r13 # imm = 0x3E8
jne .LBB6_10
# %bb.15: # %_Z7matmultPKfiS0_iPfii.exit
xorl %r12d, %r12d
callq clock
xorps %xmm5, %xmm5
movq %rax, %r15
movaps .LCPI6_1(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
xorps %xmm2, %xmm2
xorps %xmm1, %xmm1
jmp .LBB6_16
.p2align 4, 0x90
.LBB6_20: # %._crit_edge.i54
# in Loop: Header=BB6_16 Depth=1
incq %r12
addq $4000, %rbx # imm = 0xFA0
addq $4000, %r14 # imm = 0xFA0
cmpq $1000, %r12 # imm = 0x3E8
je .LBB6_21
.LBB6_16: # %.preheader.i50
# =>This Loop Header: Depth=1
# Child Loop BB6_17 Depth 2
xorl %eax, %eax
jmp .LBB6_17
.p2align 4, 0x90
.LBB6_19: # in Loop: Header=BB6_17 Depth=2
incq %rax
cmpq $1000, %rax # imm = 0x3E8
je .LBB6_20
.LBB6_17: # Parent Loop BB6_16 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%rax,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
ucomiss %xmm5, %xmm3
jne .LBB6_18
jnp .LBB6_19
.LBB6_18: # in Loop: Header=BB6_17 Depth=2
movss (%rbx,%rax,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
subss %xmm3, %xmm4
divss %xmm3, %xmm4
andps %xmm0, %xmm4
addss %xmm4, %xmm1
maxss %xmm2, %xmm4
movaps %xmm4, %xmm2
jmp .LBB6_19
.LBB6_21: # %_Z11compare_matPKfiS0_ii.exit
subq %rbp, %r15
xorps %xmm0, %xmm0
cvtss2sd %xmm2, %xmm0
divss .LCPI6_2(%rip), %xmm1
cvtss2sd %xmm1, %xmm1
movl $.L.str.2, %edi
movb $2, %al
callq printf
xorps %xmm0, %xmm0
cvtsi2sdq 16(%rsp), %xmm0 # 8-byte Folded Reload
divsd .LCPI6_3(%rip), %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
xorps %xmm0, %xmm0
cvtsi2sd %r15, %xmm0
divsd .LCPI6_3(%rip), %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
.LBB6_22:
xorl %eax, %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_ZL11matMultCUDAPKfmS0_mPfmi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "There is no device.\n"
.size .L.str, 21
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "There is no device supporting CUDA 1.x.\n"
.size .L.str.1, 41
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Max error: %g Average error: %g\n"
.size .L.str.2, 33
.type _ZL11matMultCUDAPKfmS0_mPfmi,@object # @_ZL11matMultCUDAPKfmS0_mPfmi
.section .rodata,"a",@progbits
.p2align 3, 0x0
_ZL11matMultCUDAPKfmS0_mPfmi:
.quad _ZL26__device_stub__matMultCUDAPKfmS0_mPfmi
.size _ZL11matMultCUDAPKfmS0_mPfmi, 8
.type .L.str.3,@object # @.str.3
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.3:
.asciz "GPU time used: %f \n"
.size .L.str.3, 20
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "CPU time used: %f \n"
.size .L.str.4, 20
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_ZL11matMultCUDAPKfmS0_mPfmi"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZL26__device_stub__matMultCUDAPKfmS0_mPfmi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZL11matMultCUDAPKfmS0_mPfmi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define TILE_SIZE 16
// CUDA Kernel
__global__ void matrixMul( float* C, float* A, float* B, int TM)
{
__shared__ float As [TILE_SIZE][TILE_SIZE];
__shared__ float Bs [TILE_SIZE][TILE_SIZE];
// chaque thread calcule C[i][j]
// Coordonnees absolues du thread : indices i j
int j = blockIdx.x * blockDim.x+ threadIdx.x;
int i = blockIdx.y * blockDim.y+ threadIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float value = 0;
for(int ke=0; ke<TM; ke += TILE_SIZE) {
// Remplissage de As et de Bs
As[ty][tx] = A[i * TM + ke + tx];
Bs[ty][tx] = B[(ke + ty) * TM + j];
__syncthreads();
// Calcul
for (int k = 0; k < TILE_SIZE; k++) {
value += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
C[i * TM + j] = value;
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char** argv) {
int i, j, TM, BLOCK_SIZE_X, BLOCK_SIZE_Y;
unsigned int M_size;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
cudaError_t cerror;
float elapsedTime ;
cudaEvent_t start , stop ;
// Valeurs par defaut
TM=2048;
BLOCK_SIZE_X = TILE_SIZE;
BLOCK_SIZE_Y = TILE_SIZE;
// Possibilite de lire TM dans arg1, BLOCK_SIZE_X dans arg2 et BLOCK_SIZE_Y ans arg3
if (argc>1) {
TM=atoi(argv[1]);
}
// if (argc>3) {
// BLOCK_SIZE_X =atoi(argv[2]);
// BLOCK_SIZE_Y =atoi(argv[3]);
// }
// Verification de la bonne taille TM par rapport aux dimensions des blocs
if ((TM % BLOCK_SIZE_X) !=0) {
printf("Taille matrice non multiple de taille bloc X %d \n", BLOCK_SIZE_X);
exit(1);
}
if ((TM % BLOCK_SIZE_Y) !=0) {
printf("Taille matrice non multiple de taille bloc Y %d \n", BLOCK_SIZE_Y);
exit(1);
}
// Allocation memoire sur CPU
M_size = TM*TM*sizeof(float);
h_A = (float*) malloc(M_size);
h_B = (float*) malloc(M_size);
h_C = (float*) malloc(M_size);
// initialisation des matrices avec des valeurs permettant de verifier le resultat
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
h_A[i*TM+j] = 1.0;
h_B[i*TM+j] = 1.0;
h_C[i*TM+j] = 0.0;
if (i==j) {
h_A[i*TM+j]=(float) (i+1);
h_B[i*TM+j]=(float) (i+1);
}
}
}
// Allocation memoire sur GPU
cudaMalloc((void**) &d_A, M_size);
cudaMalloc((void**) &d_B, M_size);
cudaMalloc((void**) &d_C, M_size);
// Calcul du temps : top depart
cudaEventCreate (&start ) ;
cudaEventCreate (&stop ) ;
cudaEventRecord ( start , 0 ) ;
// copie des donnes CPU vers GPU
cudaMemcpy(d_A, h_A, M_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, M_size, cudaMemcpyHostToDevice);
// choix de la structure : grille et blocs
dim3 threads(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grid(TM / threads.x, TM / threads.y);
printf("bloc %d %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
printf("grille %d %d \n", TM / threads.x, TM / threads.y);
// Lancement des threads
matrixMul<<< grid, threads >>>(d_C, d_A, d_B, TM);
// En cas d'erreur
cerror=cudaGetLastError();
if ((int)cerror !=0) {
printf("Erreur appel kernel %d \n", (int) cerror);
exit(cerror);
}
// copie des resultats depuis le GPU
cudaMemcpy(h_C, d_C, M_size, cudaMemcpyDeviceToHost);
// Calcul du temps d'execution
cudaEventRecord ( stop , 0 ) ;
cudaEventSynchronize ( stop ) ;
cudaEventElapsedTime ( &elapsedTime , start , stop ) ;
cudaEventDestroy ( start ) ;
cudaEventDestroy ( stop ) ;
printf ( "Temps consomme: %f secondes\n" , elapsedTime / 1000.0 ) ;
// Verification des resultats
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1);
}
else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))){
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
// liberation de la memoire
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
} | .file "tmpxft_0005c917_00000000-6_mul_matG2B2_et_2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
.type _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, @function
_Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9matrixMulPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, .-_Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
.globl _Z9matrixMulPfS_S_i
.type _Z9matrixMulPfS_S_i, @function
_Z9matrixMulPfS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9matrixMulPfS_S_i, .-_Z9matrixMulPfS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Taille matrice non multiple de taille bloc X %d \n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "bloc %d %d \n"
.LC4:
.string "grille %d %d \n"
.LC5:
.string "Erreur appel kernel %d \n"
.LC7:
.string "Temps consomme: %f secondes\n"
.LC8:
.string "Erreur i: %d j: %d %f\n"
.LC9:
.string "Erreur i: %d j: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rdx
movq %rdx, 88(%rsp)
xorl %edx, %edx
cmpl $1, %edi
jg .L37
movl $16777216, %edi
call malloc@PLT
movq %rax, %r15
movl $16777216, %edi
call malloc@PLT
movq %rax, 8(%rsp)
movl $16777216, %edi
call malloc@PLT
movq %rax, %rbx
movl $2048, %r12d
movl $16777216, %r14d
.L27:
movslq %r12d, %r8
leaq 0(,%r8,4), %r9
movq %r15, %rsi
movq 8(%rsp), %rcx
movq %rbx, %rdi
movl $0, %edx
movss .LC1(%rip), %xmm0
jmp .L15
.L37:
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movl %eax, %r12d
testb $15, %al
jne .L38
imull %eax, %eax
leal 0(,%rax,4), %r14d
movq %r14, %rdi
call malloc@PLT
movq %rax, %r15
movq %r14, %rdi
call malloc@PLT
movq %rax, 8(%rsp)
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbx
testl %ebp, %ebp
jg .L27
.L14:
leaq 24(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r15, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r14, %rdx
movq 8(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 72(%rsp)
movl %r12d, %r13d
movl %r12d, %ebp
shrl $4, %ebp
movl %ebp, 76(%rsp)
movl %ebp, 80(%rsp)
movl $1, 84(%rsp)
movl $16, %ecx
movl $16, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %ecx
movl %ebp, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $16, 64(%rsp)
movl $16, 68(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl 84(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L18:
call cudaGetLastError@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L40
movl $2, %ecx
movq %r14, %rdx
movq 40(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movq 56(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 20(%rsp), %rdi
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq 48(%rsp), %rdi
call cudaEventDestroy@PLT
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
pxor %xmm0, %xmm0
cvtss2sd 20(%rsp), %xmm0
divsd .LC6(%rip), %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl %r13d, %esi
movl $0, %edx
leal -1(%r13), %r9d
movl $0, %r8d
testl %r12d, %r12d
jg .L20
.L21:
movq %r15, %rdi
call free@PLT
movq 8(%rsp), %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L41
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
movl $16, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L16:
addq $1, %rax
cmpq %r8, %rax
je .L42
.L17:
movss %xmm0, (%rsi,%rax,4)
movss %xmm0, (%rcx,%rax,4)
movl $0x00000000, (%rdi,%rax,4)
cmpl %eax, %edx
jne .L16
pxor %xmm1, %xmm1
cvtsi2ssl %r10d, %xmm1
movss %xmm1, (%rsi,%rax,4)
movss %xmm1, (%rcx,%rax,4)
jmp .L16
.L42:
addl $1, %edx
addq %r9, %rsi
addq %r9, %rcx
addq %r9, %rdi
cmpl %edx, %r12d
je .L14
.L15:
movl $0, %eax
leal 1(%rdx), %r10d
jmp .L17
.L39:
movl %r12d, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
jmp .L18
.L40:
movl %eax, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L44:
leal (%rcx,%rbp), %eax
cltq
movss (%rbx,%rax,4), %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl %edi, %xmm1
ucomiss %xmm1, %xmm0
jp .L31
jne .L31
.L23:
leal 1(%rcx), %eax
cmpl %eax, %r12d
je .L43
movl %eax, %ecx
.L26:
cmpl %ecx, %edx
je .L44
leal (%rcx,%rbp), %eax
cltq
leal (%rcx,%rsi), %r10d
pxor %xmm0, %xmm0
cvtsi2ssl %r10d, %xmm0
ucomiss (%rbx,%rax,4), %xmm0
jp .L32
je .L23
.L32:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L31:
cvtss2sd %xmm0, %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L43:
leal 1(%rdx), %eax
addl %r13d, %ebp
addl $1, %esi
cmpl %ecx, %edx
je .L21
movl %eax, %edx
.L20:
leal 1(%rdx), %edi
imull %edi, %edi
addl %r9d, %edi
movl %r8d, %ecx
jmp .L26
.L41:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z9matrixMulPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z9matrixMulPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1065353216
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define TILE_SIZE 16
// CUDA Kernel
__global__ void matrixMul( float* C, float* A, float* B, int TM)
{
__shared__ float As [TILE_SIZE][TILE_SIZE];
__shared__ float Bs [TILE_SIZE][TILE_SIZE];
// chaque thread calcule C[i][j]
// Coordonnees absolues du thread : indices i j
int j = blockIdx.x * blockDim.x+ threadIdx.x;
int i = blockIdx.y * blockDim.y+ threadIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float value = 0;
for(int ke=0; ke<TM; ke += TILE_SIZE) {
// Remplissage de As et de Bs
As[ty][tx] = A[i * TM + ke + tx];
Bs[ty][tx] = B[(ke + ty) * TM + j];
__syncthreads();
// Calcul
for (int k = 0; k < TILE_SIZE; k++) {
value += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
C[i * TM + j] = value;
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char** argv) {
int i, j, TM, BLOCK_SIZE_X, BLOCK_SIZE_Y;
unsigned int M_size;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
cudaError_t cerror;
float elapsedTime ;
cudaEvent_t start , stop ;
// Valeurs par defaut
TM=2048;
BLOCK_SIZE_X = TILE_SIZE;
BLOCK_SIZE_Y = TILE_SIZE;
// Possibilite de lire TM dans arg1, BLOCK_SIZE_X dans arg2 et BLOCK_SIZE_Y ans arg3
if (argc>1) {
TM=atoi(argv[1]);
}
// if (argc>3) {
// BLOCK_SIZE_X =atoi(argv[2]);
// BLOCK_SIZE_Y =atoi(argv[3]);
// }
// Verification de la bonne taille TM par rapport aux dimensions des blocs
if ((TM % BLOCK_SIZE_X) !=0) {
printf("Taille matrice non multiple de taille bloc X %d \n", BLOCK_SIZE_X);
exit(1);
}
if ((TM % BLOCK_SIZE_Y) !=0) {
printf("Taille matrice non multiple de taille bloc Y %d \n", BLOCK_SIZE_Y);
exit(1);
}
// Allocation memoire sur CPU
M_size = TM*TM*sizeof(float);
h_A = (float*) malloc(M_size);
h_B = (float*) malloc(M_size);
h_C = (float*) malloc(M_size);
// initialisation des matrices avec des valeurs permettant de verifier le resultat
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
h_A[i*TM+j] = 1.0;
h_B[i*TM+j] = 1.0;
h_C[i*TM+j] = 0.0;
if (i==j) {
h_A[i*TM+j]=(float) (i+1);
h_B[i*TM+j]=(float) (i+1);
}
}
}
// Allocation memoire sur GPU
cudaMalloc((void**) &d_A, M_size);
cudaMalloc((void**) &d_B, M_size);
cudaMalloc((void**) &d_C, M_size);
// Calcul du temps : top depart
cudaEventCreate (&start ) ;
cudaEventCreate (&stop ) ;
cudaEventRecord ( start , 0 ) ;
// copie des donnes CPU vers GPU
cudaMemcpy(d_A, h_A, M_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, M_size, cudaMemcpyHostToDevice);
// choix de la structure : grille et blocs
dim3 threads(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grid(TM / threads.x, TM / threads.y);
printf("bloc %d %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
printf("grille %d %d \n", TM / threads.x, TM / threads.y);
// Lancement des threads
matrixMul<<< grid, threads >>>(d_C, d_A, d_B, TM);
// En cas d'erreur
cerror=cudaGetLastError();
if ((int)cerror !=0) {
printf("Erreur appel kernel %d \n", (int) cerror);
exit(cerror);
}
// copie des resultats depuis le GPU
cudaMemcpy(h_C, d_C, M_size, cudaMemcpyDeviceToHost);
// Calcul du temps d'execution
cudaEventRecord ( stop , 0 ) ;
cudaEventSynchronize ( stop ) ;
cudaEventElapsedTime ( &elapsedTime , start , stop ) ;
cudaEventDestroy ( start ) ;
cudaEventDestroy ( stop ) ;
printf ( "Temps consomme: %f secondes\n" , elapsedTime / 1000.0 ) ;
// Verification des resultats
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1);
}
else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))){
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
// liberation de la memoire
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define TILE_SIZE 16
// CUDA Kernel
__global__ void matrixMul( float* C, float* A, float* B, int TM)
{
__shared__ float As [TILE_SIZE][TILE_SIZE];
__shared__ float Bs [TILE_SIZE][TILE_SIZE];
// chaque thread calcule C[i][j]
// Coordonnees absolues du thread : indices i j
int j = blockIdx.x * blockDim.x+ threadIdx.x;
int i = blockIdx.y * blockDim.y+ threadIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float value = 0;
for(int ke=0; ke<TM; ke += TILE_SIZE) {
// Remplissage de As et de Bs
As[ty][tx] = A[i * TM + ke + tx];
Bs[ty][tx] = B[(ke + ty) * TM + j];
__syncthreads();
// Calcul
for (int k = 0; k < TILE_SIZE; k++) {
value += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
C[i * TM + j] = value;
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char** argv) {
int i, j, TM, BLOCK_SIZE_X, BLOCK_SIZE_Y;
unsigned int M_size;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
hipError_t cerror;
float elapsedTime ;
hipEvent_t start , stop ;
// Valeurs par defaut
TM=2048;
BLOCK_SIZE_X = TILE_SIZE;
BLOCK_SIZE_Y = TILE_SIZE;
// Possibilite de lire TM dans arg1, BLOCK_SIZE_X dans arg2 et BLOCK_SIZE_Y ans arg3
if (argc>1) {
TM=atoi(argv[1]);
}
// if (argc>3) {
// BLOCK_SIZE_X =atoi(argv[2]);
// BLOCK_SIZE_Y =atoi(argv[3]);
// }
// Verification de la bonne taille TM par rapport aux dimensions des blocs
if ((TM % BLOCK_SIZE_X) !=0) {
printf("Taille matrice non multiple de taille bloc X %d \n", BLOCK_SIZE_X);
exit(1);
}
if ((TM % BLOCK_SIZE_Y) !=0) {
printf("Taille matrice non multiple de taille bloc Y %d \n", BLOCK_SIZE_Y);
exit(1);
}
// Allocation memoire sur CPU
M_size = TM*TM*sizeof(float);
h_A = (float*) malloc(M_size);
h_B = (float*) malloc(M_size);
h_C = (float*) malloc(M_size);
// initialisation des matrices avec des valeurs permettant de verifier le resultat
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
h_A[i*TM+j] = 1.0;
h_B[i*TM+j] = 1.0;
h_C[i*TM+j] = 0.0;
if (i==j) {
h_A[i*TM+j]=(float) (i+1);
h_B[i*TM+j]=(float) (i+1);
}
}
}
// Allocation memoire sur GPU
hipMalloc((void**) &d_A, M_size);
hipMalloc((void**) &d_B, M_size);
hipMalloc((void**) &d_C, M_size);
// Calcul du temps : top depart
hipEventCreate (&start ) ;
hipEventCreate (&stop ) ;
hipEventRecord ( start , 0 ) ;
// copie des donnes CPU vers GPU
hipMemcpy(d_A, h_A, M_size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, M_size, hipMemcpyHostToDevice);
// choix de la structure : grille et blocs
dim3 threads(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grid(TM / threads.x, TM / threads.y);
printf("bloc %d %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
printf("grille %d %d \n", TM / threads.x, TM / threads.y);
// Lancement des threads
matrixMul<<< grid, threads >>>(d_C, d_A, d_B, TM);
// En cas d'erreur
cerror=hipGetLastError();
if ((int)cerror !=0) {
printf("Erreur appel kernel %d \n", (int) cerror);
exit(cerror);
}
// copie des resultats depuis le GPU
hipMemcpy(h_C, d_C, M_size, hipMemcpyDeviceToHost);
// Calcul du temps d'execution
hipEventRecord ( stop , 0 ) ;
hipEventSynchronize ( stop ) ;
hipEventElapsedTime ( &elapsedTime , start , stop ) ;
hipEventDestroy ( start ) ;
hipEventDestroy ( stop ) ;
printf ( "Temps consomme: %f secondes\n" , elapsedTime / 1000.0 ) ;
// Verification des resultats
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1);
}
else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))){
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
// liberation de la memoire
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define TILE_SIZE 16
// CUDA Kernel
__global__ void matrixMul( float* C, float* A, float* B, int TM)
{
__shared__ float As [TILE_SIZE][TILE_SIZE];
__shared__ float Bs [TILE_SIZE][TILE_SIZE];
// chaque thread calcule C[i][j]
// Coordonnees absolues du thread : indices i j
int j = blockIdx.x * blockDim.x+ threadIdx.x;
int i = blockIdx.y * blockDim.y+ threadIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float value = 0;
for(int ke=0; ke<TM; ke += TILE_SIZE) {
// Remplissage de As et de Bs
As[ty][tx] = A[i * TM + ke + tx];
Bs[ty][tx] = B[(ke + ty) * TM + j];
__syncthreads();
// Calcul
for (int k = 0; k < TILE_SIZE; k++) {
value += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
C[i * TM + j] = value;
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char** argv) {
int i, j, TM, BLOCK_SIZE_X, BLOCK_SIZE_Y;
unsigned int M_size;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
hipError_t cerror;
float elapsedTime ;
hipEvent_t start , stop ;
// Valeurs par defaut
TM=2048;
BLOCK_SIZE_X = TILE_SIZE;
BLOCK_SIZE_Y = TILE_SIZE;
// Possibilite de lire TM dans arg1, BLOCK_SIZE_X dans arg2 et BLOCK_SIZE_Y ans arg3
if (argc>1) {
TM=atoi(argv[1]);
}
// if (argc>3) {
// BLOCK_SIZE_X =atoi(argv[2]);
// BLOCK_SIZE_Y =atoi(argv[3]);
// }
// Verification de la bonne taille TM par rapport aux dimensions des blocs
if ((TM % BLOCK_SIZE_X) !=0) {
printf("Taille matrice non multiple de taille bloc X %d \n", BLOCK_SIZE_X);
exit(1);
}
if ((TM % BLOCK_SIZE_Y) !=0) {
printf("Taille matrice non multiple de taille bloc Y %d \n", BLOCK_SIZE_Y);
exit(1);
}
// Allocation memoire sur CPU
M_size = TM*TM*sizeof(float);
h_A = (float*) malloc(M_size);
h_B = (float*) malloc(M_size);
h_C = (float*) malloc(M_size);
// initialisation des matrices avec des valeurs permettant de verifier le resultat
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
h_A[i*TM+j] = 1.0;
h_B[i*TM+j] = 1.0;
h_C[i*TM+j] = 0.0;
if (i==j) {
h_A[i*TM+j]=(float) (i+1);
h_B[i*TM+j]=(float) (i+1);
}
}
}
// Allocation memoire sur GPU
hipMalloc((void**) &d_A, M_size);
hipMalloc((void**) &d_B, M_size);
hipMalloc((void**) &d_C, M_size);
// Calcul du temps : top depart
hipEventCreate (&start ) ;
hipEventCreate (&stop ) ;
hipEventRecord ( start , 0 ) ;
// copie des donnes CPU vers GPU
hipMemcpy(d_A, h_A, M_size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, M_size, hipMemcpyHostToDevice);
// choix de la structure : grille et blocs
dim3 threads(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grid(TM / threads.x, TM / threads.y);
printf("bloc %d %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
printf("grille %d %d \n", TM / threads.x, TM / threads.y);
// Lancement des threads
matrixMul<<< grid, threads >>>(d_C, d_A, d_B, TM);
// En cas d'erreur
cerror=hipGetLastError();
if ((int)cerror !=0) {
printf("Erreur appel kernel %d \n", (int) cerror);
exit(cerror);
}
// copie des resultats depuis le GPU
hipMemcpy(h_C, d_C, M_size, hipMemcpyDeviceToHost);
// Calcul du temps d'execution
hipEventRecord ( stop , 0 ) ;
hipEventSynchronize ( stop ) ;
hipEventElapsedTime ( &elapsedTime , start , stop ) ;
hipEventDestroy ( start ) ;
hipEventDestroy ( stop ) ;
printf ( "Temps consomme: %f secondes\n" , elapsedTime / 1000.0 ) ;
// Verification des resultats
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1);
}
else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))){
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
// liberation de la memoire
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9matrixMulPfS_S_i
.globl _Z9matrixMulPfS_S_i
.p2align 8
.type _Z9matrixMulPfS_S_i,@function
_Z9matrixMulPfS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_and_b32_e32 v6, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
v_mad_u64_u32 v[0:1], null, s14, s4, v[6:7]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_5
s_load_b128 s[4:7], s[0:1], 0x8
v_lshlrev_b32_e32 v2, 2, v6
v_lshlrev_b32_e32 v7, 6, v3
s_mov_b32 s3, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, 0x400, v2
v_mad_u64_u32 v[4:5], null, v1, s2, v[6:7]
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v5, v7, v2
s_delay_alu instid0(VALU_DEP_3)
v_add_nc_u32_e32 v6, v8, v7
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
v_add_nc_u32_e32 v10, s3, v3
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v9, s3, v4
s_mov_b32 s8, 0
v_mad_u64_u32 v[11:12], null, v10, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v10, 31, v9
v_lshlrev_b64 v[9:10], 2, v[9:10]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v12, 31, v11
s_waitcnt lgkmcnt(0)
v_add_co_u32 v9, vcc_lo, s4, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[11:12], 2, v[11:12]
v_add_co_ci_u32_e32 v10, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v11, vcc_lo, s6, v11
v_add_co_ci_u32_e32 v12, vcc_lo, s7, v12, vcc_lo
global_load_b32 v10, v[9:10], off
global_load_b32 v11, v[11:12], off
v_mov_b32_e32 v9, v8
s_waitcnt vmcnt(1)
ds_store_b32 v5, v10
s_waitcnt vmcnt(0)
ds_store_b32 v6, v11
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_3:
v_add_nc_u32_e32 v10, s8, v7
s_add_i32 s8, s8, 4
ds_load_b32 v11, v9
ds_load_b32 v10, v10
v_add_nc_u32_e32 v9, 64, v9
s_cmp_eq_u32 s8, 64
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v2, v10, v11
s_cbranch_scc0 .LBB0_3
s_add_i32 s3, s3, 16
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s3, s2
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_6
.LBB0_5:
v_mov_b32_e32 v2, 0
.LBB0_6:
s_set_inst_prefetch_distance 0x2
s_load_b64 s[0:1], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[3:4], null, v1, s2, v[0:1]
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9matrixMulPfS_S_i
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9matrixMulPfS_S_i, .Lfunc_end0-_Z9matrixMulPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9matrixMulPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9matrixMulPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define TILE_SIZE 16
// CUDA Kernel
__global__ void matrixMul( float* C, float* A, float* B, int TM)
{
__shared__ float As [TILE_SIZE][TILE_SIZE];
__shared__ float Bs [TILE_SIZE][TILE_SIZE];
// chaque thread calcule C[i][j]
// Coordonnees absolues du thread : indices i j
int j = blockIdx.x * blockDim.x+ threadIdx.x;
int i = blockIdx.y * blockDim.y+ threadIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float value = 0;
for(int ke=0; ke<TM; ke += TILE_SIZE) {
// Remplissage de As et de Bs
As[ty][tx] = A[i * TM + ke + tx];
Bs[ty][tx] = B[(ke + ty) * TM + j];
__syncthreads();
// Calcul
for (int k = 0; k < TILE_SIZE; k++) {
value += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
C[i * TM + j] = value;
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char** argv) {
int i, j, TM, BLOCK_SIZE_X, BLOCK_SIZE_Y;
unsigned int M_size;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
hipError_t cerror;
float elapsedTime ;
hipEvent_t start , stop ;
// Valeurs par defaut
TM=2048;
BLOCK_SIZE_X = TILE_SIZE;
BLOCK_SIZE_Y = TILE_SIZE;
// Possibilite de lire TM dans arg1, BLOCK_SIZE_X dans arg2 et BLOCK_SIZE_Y ans arg3
if (argc>1) {
TM=atoi(argv[1]);
}
// if (argc>3) {
// BLOCK_SIZE_X =atoi(argv[2]);
// BLOCK_SIZE_Y =atoi(argv[3]);
// }
// Verification de la bonne taille TM par rapport aux dimensions des blocs
if ((TM % BLOCK_SIZE_X) !=0) {
printf("Taille matrice non multiple de taille bloc X %d \n", BLOCK_SIZE_X);
exit(1);
}
if ((TM % BLOCK_SIZE_Y) !=0) {
printf("Taille matrice non multiple de taille bloc Y %d \n", BLOCK_SIZE_Y);
exit(1);
}
// Allocation memoire sur CPU
M_size = TM*TM*sizeof(float);
h_A = (float*) malloc(M_size);
h_B = (float*) malloc(M_size);
h_C = (float*) malloc(M_size);
// initialisation des matrices avec des valeurs permettant de verifier le resultat
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
h_A[i*TM+j] = 1.0;
h_B[i*TM+j] = 1.0;
h_C[i*TM+j] = 0.0;
if (i==j) {
h_A[i*TM+j]=(float) (i+1);
h_B[i*TM+j]=(float) (i+1);
}
}
}
// Allocation memoire sur GPU
hipMalloc((void**) &d_A, M_size);
hipMalloc((void**) &d_B, M_size);
hipMalloc((void**) &d_C, M_size);
// Calcul du temps : top depart
hipEventCreate (&start ) ;
hipEventCreate (&stop ) ;
hipEventRecord ( start , 0 ) ;
// copie des donnes CPU vers GPU
hipMemcpy(d_A, h_A, M_size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, M_size, hipMemcpyHostToDevice);
// choix de la structure : grille et blocs
dim3 threads(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grid(TM / threads.x, TM / threads.y);
printf("bloc %d %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
printf("grille %d %d \n", TM / threads.x, TM / threads.y);
// Lancement des threads
matrixMul<<< grid, threads >>>(d_C, d_A, d_B, TM);
// En cas d'erreur
cerror=hipGetLastError();
if ((int)cerror !=0) {
printf("Erreur appel kernel %d \n", (int) cerror);
exit(cerror);
}
// copie des resultats depuis le GPU
hipMemcpy(h_C, d_C, M_size, hipMemcpyDeviceToHost);
// Calcul du temps d'execution
hipEventRecord ( stop , 0 ) ;
hipEventSynchronize ( stop ) ;
hipEventElapsedTime ( &elapsedTime , start , stop ) ;
hipEventDestroy ( start ) ;
hipEventDestroy ( stop ) ;
printf ( "Temps consomme: %f secondes\n" , elapsedTime / 1000.0 ) ;
// Verification des resultats
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1);
}
else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))){
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
// liberation de la memoire
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
} | .text
.file "mul_matG2B2_et_2.hip"
.globl _Z24__device_stub__matrixMulPfS_S_i # -- Begin function _Z24__device_stub__matrixMulPfS_S_i
.p2align 4, 0x90
.type _Z24__device_stub__matrixMulPfS_S_i,@function
_Z24__device_stub__matrixMulPfS_S_i: # @_Z24__device_stub__matrixMulPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9matrixMulPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__matrixMulPfS_S_i, .Lfunc_end0-_Z24__device_stub__matrixMulPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x408f400000000000 # double 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $2048, %eax # imm = 0x800
cmpl $2, %edi
jl .LBB1_2
# %bb.1:
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
.LBB1_2:
testb $15, %al
jne .LBB1_23
# %bb.3:
movl %eax, %ebx
imull %ebx, %ebx
shll $2, %ebx
movq %rbx, %rdi
movq %rax, (%rsp) # 8-byte Spill
callq malloc
movq %rax, 24(%rsp) # 8-byte Spill
movq %rbx, %rdi
callq malloc
movq %rax, 16(%rsp) # 8-byte Spill
movq %rbx, 80(%rsp) # 8-byte Spill
movq %rbx, %rdi
callq malloc
movq (%rsp), %rcx # 8-byte Reload
movq %rax, 72(%rsp) # 8-byte Spill
movl %ecx, %r14d
testl %ecx, %ecx
jle .LBB1_10
# %bb.4: # %.preheader120.lr.ph
leaq (,%r14,4), %rbp
xorl %r13d, %r13d
xorl %r15d, %r15d
jmp .LBB1_5
.p2align 4, 0x90
.LBB1_9: # %._crit_edge
# in Loop: Header=BB1_5 Depth=1
incq %r15
movq (%rsp), %rcx # 8-byte Reload
addl %ecx, %r13d
cmpq %r14, %r15
je .LBB1_10
.LBB1_5: # %.preheader120
# =>This Loop Header: Depth=1
# Child Loop BB1_6 Depth 2
movl %r13d, %eax
movq 16(%rsp), %rdx # 8-byte Reload
leaq (%rdx,%rax,4), %r12
movq 24(%rsp), %rdx # 8-byte Reload
leaq (%rdx,%rax,4), %rbx
movl %ecx, %eax
imull %r15d, %eax
movq 72(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rdi
leal 1(%r15), %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 64(%rsp) # 4-byte Spill
xorl %esi, %esi
movq %rbp, %rdx
callq memset@PLT
xorl %eax, %eax
jmp .LBB1_6
.p2align 4, 0x90
.LBB1_8: # in Loop: Header=BB1_6 Depth=2
incq %rax
cmpq %rax, %r14
je .LBB1_9
.LBB1_6: # Parent Loop BB1_5 Depth=1
# => This Inner Loop Header: Depth=2
movl $1065353216, (%rbx,%rax,4) # imm = 0x3F800000
movl $1065353216, (%r12,%rax,4) # imm = 0x3F800000
cmpq %rax, %r15
jne .LBB1_8
# %bb.7: # in Loop: Header=BB1_6 Depth=2
movss 64(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%rbx,%rax,4)
movss %xmm0, (%r12,%rax,4)
jmp .LBB1_8
.LBB1_10: # %._crit_edge127
leaq 56(%rsp), %rdi
movq 80(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
movq %rcx, %rbp
callq hipMalloc
leaq 48(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 40(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 32(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 56(%rsp), %rdi
movq 24(%rsp), %rsi # 8-byte Reload
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq 48(%rsp), %rdi
movq 16(%rsp), %rsi # 8-byte Reload
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
# kill: def $ebp killed $ebp killed $rbp def $rbp
shrl $4, %ebp
movq %rbp, %r13
shlq $32, %r13
orq %rbp, %r13
movl $.L.str.2, %edi
movl $16, %esi
movl $16, %edx
xorl %eax, %eax
callq printf
movl $.L.str.3, %edi
movl %ebp, %esi
movl %ebp, %edx
xorl %eax, %eax
callq printf
movabsq $68719476752, %rdx # imm = 0x1000000010
movq %r13, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_12
# %bb.11:
movq 40(%rsp), %rax
movq 56(%rsp), %rcx
movq 48(%rsp), %rdx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
movq (%rsp), %rax # 8-byte Reload
movl %eax, 68(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 136(%rsp), %rax
movq %rax, 176(%rsp)
leaq 68(%rsp), %rax
movq %rax, 184(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 160(%rsp), %r9
movl $_Z9matrixMulPfS_S_i, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_12:
callq hipGetLastError
testl %eax, %eax
movq 72(%rsp), %rbx # 8-byte Reload
jne .LBB1_24
# %bb.13:
movq 40(%rsp), %rsi
movq %rbx, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %r13d, %r13d
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 160(%rsp), %rdi
callq hipEventElapsedTime
movq 32(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipEventDestroy
movss 160(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
divsd .LCPI1_0(%rip), %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq (%rsp), %r10 # 8-byte Reload
testl %r10d, %r10d
jle .LBB1_22
# %bb.14: # %.preheader.lr.ph
leal -1(%r10), %eax
movq %r14, %rcx
xorl %edi, %edi
xorl %esi, %esi
jmp .LBB1_15
.p2align 4, 0x90
.LBB1_21: # %._crit_edge130
# in Loop: Header=BB1_15 Depth=1
incq %rsi
addl %r10d, %r13d
decq %rdi
incq %rcx
cmpq %r14, %rsi
je .LBB1_22
.LBB1_15: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_16 Depth 2
movl %r13d, %edx
leaq (%rbx,%rdx,4), %r8
leal 1(%rsi), %edx
imull %edx, %edx
addl %eax, %edx
xorps %xmm0, %xmm0
cvtsi2ss %edx, %xmm0
xorl %edx, %edx
jmp .LBB1_16
.p2align 4, 0x90
.LBB1_19: # %.critedge
# in Loop: Header=BB1_16 Depth=2
leal (%rcx,%rdx), %r9d
xorps %xmm1, %xmm1
cvtsi2ss %r9d, %xmm1
movss (%r8,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm2
jne .LBB1_26
jp .LBB1_26
.LBB1_20: # in Loop: Header=BB1_16 Depth=2
incq %rdx
cmpq %rdx, %r14
je .LBB1_21
.LBB1_16: # Parent Loop BB1_15 Depth=1
# => This Inner Loop Header: Depth=2
movq %rdi, %r9
addq %rdx, %r9
jne .LBB1_19
# %bb.17: # in Loop: Header=BB1_16 Depth=2
movss (%r8,%rdx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm1
jne .LBB1_25
jp .LBB1_25
# %bb.18: # in Loop: Header=BB1_16 Depth=2
testq %r9, %r9
jne .LBB1_19
jmp .LBB1_20
.LBB1_22: # %._crit_edge132
movq 24(%rsp), %rdi # 8-byte Reload
callq free
movq 16(%rsp), %rdi # 8-byte Reload
callq free
movq %rbx, %rdi
callq free
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_26:
.cfi_def_cfa_offset 256
movl $.L.str.7, %edi
# kill: def $esi killed $esi killed $rsi
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.LBB1_25:
xorps %xmm0, %xmm0
cvtss2sd %xmm1, %xmm0
movl $.L.str.6, %edi
# kill: def $esi killed $esi killed $rsi
# kill: def $edx killed $edx killed $rdx
movb $1, %al
callq printf
movl $1, %edi
callq exit
.LBB1_23:
movl $.L.str, %edi
movl $16, %esi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.LBB1_24:
movl $.L.str.4, %edi
movl %eax, %esi
movl %eax, %ebx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9matrixMulPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9matrixMulPfS_S_i,@object # @_Z9matrixMulPfS_S_i
.section .rodata,"a",@progbits
.globl _Z9matrixMulPfS_S_i
.p2align 3, 0x0
_Z9matrixMulPfS_S_i:
.quad _Z24__device_stub__matrixMulPfS_S_i
.size _Z9matrixMulPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Taille matrice non multiple de taille bloc X %d \n"
.size .L.str, 50
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "bloc %d %d \n"
.size .L.str.2, 13
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "grille %d %d \n"
.size .L.str.3, 15
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Erreur appel kernel %d \n"
.size .L.str.4, 25
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Temps consomme: %f secondes\n"
.size .L.str.5, 29
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Erreur i: %d j: %d %f\n"
.size .L.str.6, 23
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Erreur i: %d j: %d\n"
.size .L.str.7, 20
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9matrixMulPfS_S_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__matrixMulPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9matrixMulPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0005c917_00000000-6_mul_matG2B2_et_2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
.type _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, @function
_Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9matrixMulPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i, .-_Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
.globl _Z9matrixMulPfS_S_i
.type _Z9matrixMulPfS_S_i, @function
_Z9matrixMulPfS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9matrixMulPfS_S_i, .-_Z9matrixMulPfS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Taille matrice non multiple de taille bloc X %d \n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "bloc %d %d \n"
.LC4:
.string "grille %d %d \n"
.LC5:
.string "Erreur appel kernel %d \n"
.LC7:
.string "Temps consomme: %f secondes\n"
.LC8:
.string "Erreur i: %d j: %d %f\n"
.LC9:
.string "Erreur i: %d j: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rdx
movq %rdx, 88(%rsp)
xorl %edx, %edx
cmpl $1, %edi
jg .L37
movl $16777216, %edi
call malloc@PLT
movq %rax, %r15
movl $16777216, %edi
call malloc@PLT
movq %rax, 8(%rsp)
movl $16777216, %edi
call malloc@PLT
movq %rax, %rbx
movl $2048, %r12d
movl $16777216, %r14d
.L27:
movslq %r12d, %r8
leaq 0(,%r8,4), %r9
movq %r15, %rsi
movq 8(%rsp), %rcx
movq %rbx, %rdi
movl $0, %edx
movss .LC1(%rip), %xmm0
jmp .L15
.L37:
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movl %eax, %r12d
testb $15, %al
jne .L38
imull %eax, %eax
leal 0(,%rax,4), %r14d
movq %r14, %rdi
call malloc@PLT
movq %rax, %r15
movq %r14, %rdi
call malloc@PLT
movq %rax, 8(%rsp)
movq %r14, %rdi
call malloc@PLT
movq %rax, %rbx
testl %ebp, %ebp
jg .L27
.L14:
leaq 24(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r15, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r14, %rdx
movq 8(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 72(%rsp)
movl %r12d, %r13d
movl %r12d, %ebp
shrl $4, %ebp
movl %ebp, 76(%rsp)
movl %ebp, 80(%rsp)
movl $1, 84(%rsp)
movl $16, %ecx
movl $16, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %ecx
movl %ebp, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $16, 64(%rsp)
movl $16, 68(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl 84(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L18:
call cudaGetLastError@PLT
movl %eax, %ebp
testl %eax, %eax
jne .L40
movl $2, %ecx
movq %r14, %rdx
movq 40(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movq 56(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 20(%rsp), %rdi
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq 48(%rsp), %rdi
call cudaEventDestroy@PLT
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
pxor %xmm0, %xmm0
cvtss2sd 20(%rsp), %xmm0
divsd .LC6(%rip), %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl %r13d, %esi
movl $0, %edx
leal -1(%r13), %r9d
movl $0, %r8d
testl %r12d, %r12d
jg .L20
.L21:
movq %r15, %rdi
call free@PLT
movq 8(%rsp), %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L41
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
movl $16, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L16:
addq $1, %rax
cmpq %r8, %rax
je .L42
.L17:
movss %xmm0, (%rsi,%rax,4)
movss %xmm0, (%rcx,%rax,4)
movl $0x00000000, (%rdi,%rax,4)
cmpl %eax, %edx
jne .L16
pxor %xmm1, %xmm1
cvtsi2ssl %r10d, %xmm1
movss %xmm1, (%rsi,%rax,4)
movss %xmm1, (%rcx,%rax,4)
jmp .L16
.L42:
addl $1, %edx
addq %r9, %rsi
addq %r9, %rcx
addq %r9, %rdi
cmpl %edx, %r12d
je .L14
.L15:
movl $0, %eax
leal 1(%rdx), %r10d
jmp .L17
.L39:
movl %r12d, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z33__device_stub__Z9matrixMulPfS_S_iPfS_S_i
jmp .L18
.L40:
movl %eax, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call exit@PLT
.L44:
leal (%rcx,%rbp), %eax
cltq
movss (%rbx,%rax,4), %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl %edi, %xmm1
ucomiss %xmm1, %xmm0
jp .L31
jne .L31
.L23:
leal 1(%rcx), %eax
cmpl %eax, %r12d
je .L43
movl %eax, %ecx
.L26:
cmpl %ecx, %edx
je .L44
leal (%rcx,%rbp), %eax
cltq
leal (%rcx,%rsi), %r10d
pxor %xmm0, %xmm0
cvtsi2ssl %r10d, %xmm0
ucomiss (%rbx,%rax,4), %xmm0
jp .L32
je .L23
.L32:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L31:
cvtss2sd %xmm0, %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L43:
leal 1(%rdx), %eax
addl %r13d, %ebp
addl $1, %esi
cmpl %ecx, %edx
je .L21
movl %eax, %edx
.L20:
leal 1(%rdx), %edi
imull %edi, %edi
addl %r9d, %edi
movl %r8d, %ecx
jmp .L26
.L41:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z9matrixMulPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z9matrixMulPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1065353216
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "mul_matG2B2_et_2.hip"
.globl _Z24__device_stub__matrixMulPfS_S_i # -- Begin function _Z24__device_stub__matrixMulPfS_S_i
.p2align 4, 0x90
.type _Z24__device_stub__matrixMulPfS_S_i,@function
_Z24__device_stub__matrixMulPfS_S_i: # @_Z24__device_stub__matrixMulPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9matrixMulPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__matrixMulPfS_S_i, .Lfunc_end0-_Z24__device_stub__matrixMulPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x408f400000000000 # double 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $2048, %eax # imm = 0x800
cmpl $2, %edi
jl .LBB1_2
# %bb.1:
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
.LBB1_2:
testb $15, %al
jne .LBB1_23
# %bb.3:
movl %eax, %ebx
imull %ebx, %ebx
shll $2, %ebx
movq %rbx, %rdi
movq %rax, (%rsp) # 8-byte Spill
callq malloc
movq %rax, 24(%rsp) # 8-byte Spill
movq %rbx, %rdi
callq malloc
movq %rax, 16(%rsp) # 8-byte Spill
movq %rbx, 80(%rsp) # 8-byte Spill
movq %rbx, %rdi
callq malloc
movq (%rsp), %rcx # 8-byte Reload
movq %rax, 72(%rsp) # 8-byte Spill
movl %ecx, %r14d
testl %ecx, %ecx
jle .LBB1_10
# %bb.4: # %.preheader120.lr.ph
leaq (,%r14,4), %rbp
xorl %r13d, %r13d
xorl %r15d, %r15d
jmp .LBB1_5
.p2align 4, 0x90
.LBB1_9: # %._crit_edge
# in Loop: Header=BB1_5 Depth=1
incq %r15
movq (%rsp), %rcx # 8-byte Reload
addl %ecx, %r13d
cmpq %r14, %r15
je .LBB1_10
.LBB1_5: # %.preheader120
# =>This Loop Header: Depth=1
# Child Loop BB1_6 Depth 2
movl %r13d, %eax
movq 16(%rsp), %rdx # 8-byte Reload
leaq (%rdx,%rax,4), %r12
movq 24(%rsp), %rdx # 8-byte Reload
leaq (%rdx,%rax,4), %rbx
movl %ecx, %eax
imull %r15d, %eax
movq 72(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rdi
leal 1(%r15), %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 64(%rsp) # 4-byte Spill
xorl %esi, %esi
movq %rbp, %rdx
callq memset@PLT
xorl %eax, %eax
jmp .LBB1_6
.p2align 4, 0x90
.LBB1_8: # in Loop: Header=BB1_6 Depth=2
incq %rax
cmpq %rax, %r14
je .LBB1_9
.LBB1_6: # Parent Loop BB1_5 Depth=1
# => This Inner Loop Header: Depth=2
movl $1065353216, (%rbx,%rax,4) # imm = 0x3F800000
movl $1065353216, (%r12,%rax,4) # imm = 0x3F800000
cmpq %rax, %r15
jne .LBB1_8
# %bb.7: # in Loop: Header=BB1_6 Depth=2
movss 64(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, (%rbx,%rax,4)
movss %xmm0, (%r12,%rax,4)
jmp .LBB1_8
.LBB1_10: # %._crit_edge127
leaq 56(%rsp), %rdi
movq 80(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
movq %rcx, %rbp
callq hipMalloc
leaq 48(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 40(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 32(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 32(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 56(%rsp), %rdi
movq 24(%rsp), %rsi # 8-byte Reload
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq 48(%rsp), %rdi
movq 16(%rsp), %rsi # 8-byte Reload
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
# kill: def $ebp killed $ebp killed $rbp def $rbp
shrl $4, %ebp
movq %rbp, %r13
shlq $32, %r13
orq %rbp, %r13
movl $.L.str.2, %edi
movl $16, %esi
movl $16, %edx
xorl %eax, %eax
callq printf
movl $.L.str.3, %edi
movl %ebp, %esi
movl %ebp, %edx
xorl %eax, %eax
callq printf
movabsq $68719476752, %rdx # imm = 0x1000000010
movq %r13, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_12
# %bb.11:
movq 40(%rsp), %rax
movq 56(%rsp), %rcx
movq 48(%rsp), %rdx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
movq (%rsp), %rax # 8-byte Reload
movl %eax, 68(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 136(%rsp), %rax
movq %rax, 176(%rsp)
leaq 68(%rsp), %rax
movq %rax, 184(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 160(%rsp), %r9
movl $_Z9matrixMulPfS_S_i, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_12:
callq hipGetLastError
testl %eax, %eax
movq 72(%rsp), %rbx # 8-byte Reload
jne .LBB1_24
# %bb.13:
movq 40(%rsp), %rsi
movq %rbx, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %r13d, %r13d
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 160(%rsp), %rdi
callq hipEventElapsedTime
movq 32(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipEventDestroy
movss 160(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
divsd .LCPI1_0(%rip), %xmm0
movl $.L.str.5, %edi
movb $1, %al
callq printf
movq (%rsp), %r10 # 8-byte Reload
testl %r10d, %r10d
jle .LBB1_22
# %bb.14: # %.preheader.lr.ph
leal -1(%r10), %eax
movq %r14, %rcx
xorl %edi, %edi
xorl %esi, %esi
jmp .LBB1_15
.p2align 4, 0x90
.LBB1_21: # %._crit_edge130
# in Loop: Header=BB1_15 Depth=1
incq %rsi
addl %r10d, %r13d
decq %rdi
incq %rcx
cmpq %r14, %rsi
je .LBB1_22
.LBB1_15: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_16 Depth 2
movl %r13d, %edx
leaq (%rbx,%rdx,4), %r8
leal 1(%rsi), %edx
imull %edx, %edx
addl %eax, %edx
xorps %xmm0, %xmm0
cvtsi2ss %edx, %xmm0
xorl %edx, %edx
jmp .LBB1_16
.p2align 4, 0x90
.LBB1_19: # %.critedge
# in Loop: Header=BB1_16 Depth=2
leal (%rcx,%rdx), %r9d
xorps %xmm1, %xmm1
cvtsi2ss %r9d, %xmm1
movss (%r8,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm2
jne .LBB1_26
jp .LBB1_26
.LBB1_20: # in Loop: Header=BB1_16 Depth=2
incq %rdx
cmpq %rdx, %r14
je .LBB1_21
.LBB1_16: # Parent Loop BB1_15 Depth=1
# => This Inner Loop Header: Depth=2
movq %rdi, %r9
addq %rdx, %r9
jne .LBB1_19
# %bb.17: # in Loop: Header=BB1_16 Depth=2
movss (%r8,%rdx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm1
jne .LBB1_25
jp .LBB1_25
# %bb.18: # in Loop: Header=BB1_16 Depth=2
testq %r9, %r9
jne .LBB1_19
jmp .LBB1_20
.LBB1_22: # %._crit_edge132
movq 24(%rsp), %rdi # 8-byte Reload
callq free
movq 16(%rsp), %rdi # 8-byte Reload
callq free
movq %rbx, %rdi
callq free
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_26:
.cfi_def_cfa_offset 256
movl $.L.str.7, %edi
# kill: def $esi killed $esi killed $rsi
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.LBB1_25:
xorps %xmm0, %xmm0
cvtss2sd %xmm1, %xmm0
movl $.L.str.6, %edi
# kill: def $esi killed $esi killed $rsi
# kill: def $edx killed $edx killed $rdx
movb $1, %al
callq printf
movl $1, %edi
callq exit
.LBB1_23:
movl $.L.str, %edi
movl $16, %esi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.LBB1_24:
movl $.L.str.4, %edi
movl %eax, %esi
movl %eax, %ebx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9matrixMulPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9matrixMulPfS_S_i,@object # @_Z9matrixMulPfS_S_i
.section .rodata,"a",@progbits
.globl _Z9matrixMulPfS_S_i
.p2align 3, 0x0
_Z9matrixMulPfS_S_i:
.quad _Z24__device_stub__matrixMulPfS_S_i
.size _Z9matrixMulPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Taille matrice non multiple de taille bloc X %d \n"
.size .L.str, 50
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "bloc %d %d \n"
.size .L.str.2, 13
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "grille %d %d \n"
.size .L.str.3, 15
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Erreur appel kernel %d \n"
.size .L.str.4, 25
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Temps consomme: %f secondes\n"
.size .L.str.5, 29
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Erreur i: %d j: %d %f\n"
.size .L.str.6, 23
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Erreur i: %d j: %d\n"
.size .L.str.7, 20
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9matrixMulPfS_S_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__matrixMulPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9matrixMulPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.