system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <unistd.h>
#define CUDA_CHECK_RETURN( value ) { \
cudaError_t _m_cudaStat = value; \
if ( _m_cudaStat != cudaSuccess ) { \
fprintf( stderr, "Error '%s' at line %d in file %s\n", \
cudaGetErrorString( _m_cudaStat ), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
int main()
{
int deviceCount = 0;
CUDA_CHECK_RETURN( cudaGetDeviceCount( &deviceCount ) );
printf( "Device count: %d\n", deviceCount );
//
for( int i = 0; i < deviceCount; i++ )
{
CUDA_CHECK_RETURN( cudaSetDevice( i ) );
cudaDeviceProp deviceProp;
CUDA_CHECK_RETURN( cudaGetDeviceProperties( &deviceProp, i ) );
printf("GPU%d is capable of directly accessing memory from \n", i );
for( int j = 0; j < deviceCount; j++ )
{
if( i == j )
continue;
int accessible;
cudaDeviceCanAccessPeer( &accessible, i, j );
printf( " GPU%d: %s\n", j, accessible ? "yes" : "no" );
}
}
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <unistd.h>
#define CUDA_CHECK_RETURN( value ) { \
hipError_t _m_cudaStat = value; \
if ( _m_cudaStat != hipSuccess ) { \
fprintf( stderr, "Error '%s' at line %d in file %s\n", \
hipGetErrorString( _m_cudaStat ), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
int main()
{
int deviceCount = 0;
CUDA_CHECK_RETURN( hipGetDeviceCount( &deviceCount ) );
printf( "Device count: %d\n", deviceCount );
//
for( int i = 0; i < deviceCount; i++ )
{
CUDA_CHECK_RETURN( hipSetDevice( i ) );
hipDeviceProp_t deviceProp;
CUDA_CHECK_RETURN( hipGetDeviceProperties( &deviceProp, i ) );
printf("GPU%d is capable of directly accessing memory from \n", i );
for( int j = 0; j < deviceCount; j++ )
{
if( i == j )
continue;
int accessible;
hipDeviceCanAccessPeer( &accessible, i, j );
printf( " GPU%d: %s\n", j, accessible ? "yes" : "no" );
}
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <unistd.h>
#define CUDA_CHECK_RETURN( value ) { \
hipError_t _m_cudaStat = value; \
if ( _m_cudaStat != hipSuccess ) { \
fprintf( stderr, "Error '%s' at line %d in file %s\n", \
hipGetErrorString( _m_cudaStat ), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
int main()
{
int deviceCount = 0;
CUDA_CHECK_RETURN( hipGetDeviceCount( &deviceCount ) );
printf( "Device count: %d\n", deviceCount );
//
for( int i = 0; i < deviceCount; i++ )
{
CUDA_CHECK_RETURN( hipSetDevice( i ) );
hipDeviceProp_t deviceProp;
CUDA_CHECK_RETURN( hipGetDeviceProperties( &deviceProp, i ) );
printf("GPU%d is capable of directly accessing memory from \n", i );
for( int j = 0; j < deviceCount; j++ )
{
if( i == j )
continue;
int accessible;
hipDeviceCanAccessPeer( &accessible, i, j );
printf( " GPU%d: %s\n", j, accessible ? "yes" : "no" );
}
}
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <unistd.h>
#define CUDA_CHECK_RETURN( value ) { \
hipError_t _m_cudaStat = value; \
if ( _m_cudaStat != hipSuccess ) { \
fprintf( stderr, "Error '%s' at line %d in file %s\n", \
hipGetErrorString( _m_cudaStat ), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
int main()
{
int deviceCount = 0;
CUDA_CHECK_RETURN( hipGetDeviceCount( &deviceCount ) );
printf( "Device count: %d\n", deviceCount );
//
for( int i = 0; i < deviceCount; i++ )
{
CUDA_CHECK_RETURN( hipSetDevice( i ) );
hipDeviceProp_t deviceProp;
CUDA_CHECK_RETURN( hipGetDeviceProperties( &deviceProp, i ) );
printf("GPU%d is capable of directly accessing memory from \n", i );
for( int j = 0; j < deviceCount; j++ )
{
if( i == j )
continue;
int accessible;
hipDeviceCanAccessPeer( &accessible, i, j );
printf( " GPU%d: %s\n", j, accessible ? "yes" : "no" );
}
}
return 0;
} | .text
.file "access.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 1536
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $0, 8(%rsp)
leaq 8(%rsp), %rdi
callq hipGetDeviceCount
testl %eax, %eax
jne .LBB0_1
# %bb.3:
movl 8(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
cmpl $0, 8(%rsp)
jle .LBB0_15
# %bb.4: # %.lr.ph31.preheader
leaq 16(%rsp), %rbx
leaq 12(%rsp), %r14
movl $.L.str.6, %r12d
xorl %ebp, %ebp
jmp .LBB0_5
.p2align 4, 0x90
.LBB0_14: # %._crit_edge
# in Loop: Header=BB0_5 Depth=1
incl %ebp
cmpl 8(%rsp), %ebp
jge .LBB0_15
.LBB0_5: # %.lr.ph31
# =>This Loop Header: Depth=1
# Child Loop BB0_11 Depth 2
movl %ebp, %edi
callq hipSetDevice
testl %eax, %eax
jne .LBB0_6
# %bb.7: # in Loop: Header=BB0_5 Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
testl %eax, %eax
jne .LBB0_8
# %bb.9: # in Loop: Header=BB0_5 Depth=1
movl $.L.str.3, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
cmpl $0, 8(%rsp)
jle .LBB0_14
# %bb.10: # %.lr.ph.preheader
# in Loop: Header=BB0_5 Depth=1
xorl %r15d, %r15d
jmp .LBB0_11
.p2align 4, 0x90
.LBB0_13: # in Loop: Header=BB0_11 Depth=2
incl %r15d
cmpl 8(%rsp), %r15d
jge .LBB0_14
.LBB0_11: # %.lr.ph
# Parent Loop BB0_5 Depth=1
# => This Inner Loop Header: Depth=2
cmpl %r15d, %ebp
je .LBB0_13
# %bb.12: # in Loop: Header=BB0_11 Depth=2
movq %r14, %rdi
movl %ebp, %esi
movl %r15d, %edx
callq hipDeviceCanAccessPeer
cmpl $0, 12(%rsp)
movl $.L.str.5, %edx
cmoveq %r12, %rdx
movl $.L.str.4, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
jmp .LBB0_13
.LBB0_15: # %._crit_edge32
xorl %eax, %eax
addq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_8:
.cfi_def_cfa_offset 1536
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $25, %ecx
jmp .LBB0_2
.LBB0_6:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $23, %ecx
.LBB0_2:
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.LBB0_1:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $17, %ecx
jmp .LBB0_2
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error '%s' at line %d in file %s\n"
.size .L.str, 34
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/RuslanKutdusov/convexHull/master/access.hip"
.size .L.str.1, 101
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Device count: %d\n"
.size .L.str.2, 18
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "GPU%d is capable of directly accessing memory from \n"
.size .L.str.3, 53
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\tGPU%d: %s\n"
.size .L.str.4, 12
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "yes"
.size .L.str.5, 4
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "no"
.size .L.str.6, 3
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0008d8ae_00000000-6_access.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2073:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "yes"
.LC1:
.string "no"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "/home/ubuntu/Datasets/stackv2/train-structured/RuslanKutdusov/convexHull/master/access.cu"
.align 8
.LC3:
.string "Error '%s' at line %d in file %s\n"
.section .rodata.str1.1
.LC4:
.string "Device count: %d\n"
.section .rodata.str1.8
.align 8
.LC5:
.string "GPU%d is capable of directly accessing memory from \n"
.section .rodata.str1.1
.LC6:
.string "\tGPU%d: %s\n"
.text
.globl main
.type main, @function
main:
.LFB2070:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1064, %rsp
.cfi_def_cfa_offset 1120
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
movl $0, 8(%rsp)
leaq 8(%rsp), %rdi
call cudaGetDeviceCount@PLT
testl %eax, %eax
jne .L18
movl 8(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 8(%rsp)
jle .L5
movl $0, %ebp
leaq .LC1(%rip), %r15
leaq .LC0(%rip), %r14
jmp .L12
.L18:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC2(%rip), %r9
movl $15, %r8d
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L20:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC2(%rip), %r9
movl $21, %r8d
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L21:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC2(%rip), %r9
movl $23, %r8d
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L19:
movl %ebx, %edx
movl %ebp, %esi
movq %r13, %rdi
call cudaDeviceCanAccessPeer@PLT
cmpl $0, 12(%rsp)
movq %r14, %rcx
cmove %r15, %rcx
movl %ebx, %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L9:
addl $1, %ebx
cmpl %ebx, 8(%rsp)
jle .L8
.L11:
cmpl %ebx, %ebp
jne .L19
jmp .L9
.L8:
addl $1, %ebp
cmpl %ebp, 8(%rsp)
jle .L5
.L12:
movl %ebp, %edi
call cudaSetDevice@PLT
testl %eax, %eax
jne .L20
leaq 16(%rsp), %rdi
movl %ebp, %esi
call cudaGetDeviceProperties_v2@PLT
testl %eax, %eax
jne .L21
movl %ebp, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 8(%rsp)
jle .L8
movl $0, %ebx
leaq 12(%rsp), %r13
leaq .LC6(%rip), %r12
jmp .L11
.L5:
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L22
movl $0, %eax
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2096:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "access.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 1536
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $0, 8(%rsp)
leaq 8(%rsp), %rdi
callq hipGetDeviceCount
testl %eax, %eax
jne .LBB0_1
# %bb.3:
movl 8(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
cmpl $0, 8(%rsp)
jle .LBB0_15
# %bb.4: # %.lr.ph31.preheader
leaq 16(%rsp), %rbx
leaq 12(%rsp), %r14
movl $.L.str.6, %r12d
xorl %ebp, %ebp
jmp .LBB0_5
.p2align 4, 0x90
.LBB0_14: # %._crit_edge
# in Loop: Header=BB0_5 Depth=1
incl %ebp
cmpl 8(%rsp), %ebp
jge .LBB0_15
.LBB0_5: # %.lr.ph31
# =>This Loop Header: Depth=1
# Child Loop BB0_11 Depth 2
movl %ebp, %edi
callq hipSetDevice
testl %eax, %eax
jne .LBB0_6
# %bb.7: # in Loop: Header=BB0_5 Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
testl %eax, %eax
jne .LBB0_8
# %bb.9: # in Loop: Header=BB0_5 Depth=1
movl $.L.str.3, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
cmpl $0, 8(%rsp)
jle .LBB0_14
# %bb.10: # %.lr.ph.preheader
# in Loop: Header=BB0_5 Depth=1
xorl %r15d, %r15d
jmp .LBB0_11
.p2align 4, 0x90
.LBB0_13: # in Loop: Header=BB0_11 Depth=2
incl %r15d
cmpl 8(%rsp), %r15d
jge .LBB0_14
.LBB0_11: # %.lr.ph
# Parent Loop BB0_5 Depth=1
# => This Inner Loop Header: Depth=2
cmpl %r15d, %ebp
je .LBB0_13
# %bb.12: # in Loop: Header=BB0_11 Depth=2
movq %r14, %rdi
movl %ebp, %esi
movl %r15d, %edx
callq hipDeviceCanAccessPeer
cmpl $0, 12(%rsp)
movl $.L.str.5, %edx
cmoveq %r12, %rdx
movl $.L.str.4, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
jmp .LBB0_13
.LBB0_15: # %._crit_edge32
xorl %eax, %eax
addq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_8:
.cfi_def_cfa_offset 1536
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $25, %ecx
jmp .LBB0_2
.LBB0_6:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $23, %ecx
.LBB0_2:
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.LBB0_1:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $17, %ecx
jmp .LBB0_2
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error '%s' at line %d in file %s\n"
.size .L.str, 34
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/RuslanKutdusov/convexHull/master/access.hip"
.size .L.str.1, 101
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Device count: %d\n"
.size .L.str.2, 18
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "GPU%d is capable of directly accessing memory from \n"
.size .L.str.3, 53
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\tGPU%d: %s\n"
.size .L.str.4, 12
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "yes"
.size .L.str.5, 4
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "no"
.size .L.str.6, 3
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include <cuda.h>
#include <assert.h>
/* To save you time, we are including all 6 variants of the loop ordering
as separate functions and then calling them using function pointers.
The reason for having separate functions that are nearly identical is
to avoid counting any extraneous processing towards the computation
time. This includes I/O accesses (printf) and conditionals (if/switch).
I/O accesses are slow and conditional/branching statements could
unfairly bias results (lower cases in switches must run through more
case statements on each iteration).
*/
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
const int TILE_WIDTH_GEMM = 16;
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
void multMat1( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is ijk loop order. */
for( i = 0; i < n; i++ )
for( j = 0; j < n; j++ )
for( k = 0; k < n; k++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat2( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is ikj loop order. */
for( i = 0; i < n; i++ )
for( k = 0; k < n; k++ )
for( j = 0; j < n; j++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat3( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is jik loop order. */
for( j = 0; j < n; j++ )
for( i = 0; i < n; i++ )
for( k = 0; k < n; k++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat4( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is jki loop order. */
for( j = 0; j < n; j++ )
for( k = 0; k < n; k++ )
for( i = 0; i < n; i++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat5( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is kij loop order. */
for( k = 0; k < n; k++ )
for( i = 0; i < n; i++ )
for( j = 0; j < n; j++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat6( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is kji loop order. */
for( k = 0; k < n; k++ )
for( j = 0; j < n; j++ )
for( i = 0; i < n; i++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
/* Question 1 */
// GPU based GEMM with SM-specific shared memory
__global__ void MatrixMultiplyKernel(const float* devM, const float* devN,float* devP, const int width){
__shared__ float sM[TILE_WIDTH_GEMM][TILE_WIDTH_GEMM];
__shared__ float sN[TILE_WIDTH_GEMM][TILE_WIDTH_GEMM];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = bx * TILE_WIDTH_GEMM + bx;
int row = by * TILE_WIDTH_GEMM + ty;
// Initialize accumulator to 0. Then multiply/add
float pValue = 0;
for (int m = 0; m < width / TILE_WIDTH_GEMM; m++) {
sM[ty][tx] = devM[row *width+(m*TILE_WIDTH_GEMM + tx)];
sN[ty][tx] = devN[col+(m *TILE_WIDTH_GEMM+ty)*width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH_GEMM; ++k)
pValue += sM[ty][k] * sN[k][tx];
__syncthreads();
}
devP[row * width + col] = pValue;
}
/* Question 2 */
// Simple matrix copying
__global__ void copy(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[(y+j)*width + x] = idata[(y+j)*width + x];
}
// Matrix copy with shared memory
__global__ void copySharedMem(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM * TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x];
}
// Native transpose
__global__ void transposeNaive(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
// Coalesced transpose with block shared memory
__global__ void transposeCoalesced(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
// Coalesced transpose with shared memory and matrix padding
__global__ void transposeNoBankConflicts(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
/* uses timing features from sys/time.h that you haven't seen before */
int main( int argc, char **argv ) {
// CPU implementation
int nmax = 1000, i;
void (*orderings[])(int,float *,float *,float *) =
{&multMat1,&multMat2,&multMat3,&multMat4,&multMat5,&multMat6};
char *names[] = {"ijk","ikj","jik","jki","kij","kji"};
float *A = (float *)malloc( nmax*nmax * sizeof(float));
float *B = (float *)malloc( nmax*nmax * sizeof(float));
float *C = (float *)malloc( nmax*nmax * sizeof(float));
struct timeval start, end;
/* fill matrices with random numbers */
for( i = 0; i < nmax*nmax; i++ ) A[i] = drand48()*2-1;
for( i = 0; i < nmax*nmax; i++ ) B[i] = drand48()*2-1;
for( i = 0; i < nmax*nmax; i++ ) C[i] = drand48()*2-1;
for( i = 0; i < 6; i++) {
/* multiply matrices and measure the time */
gettimeofday( &start, NULL );
(*orderings[i])( nmax, A, B, C );
gettimeofday( &end, NULL );
/* convert time to Gflop/s */
double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
double Gflops = 2e-9*nmax*nmax*nmax/seconds;
printf( "%s:\tn = %d, %.3f Gflop/s\n", names[i], nmax, Gflops );
}
free( A );
free( B );
free( C );
printf("\n\n");
// HW2: Question 1
int m_size1 = 1600, n_size1 = 1600;
int width1 = 1600;
int iterations1 = 100;
float ms1 = 0;
float GFLOPs = 0;
float *A_h1 = (float *)malloc( m_size1*n_size1*sizeof(float));
float *B_h1 = (float *)malloc( m_size1*n_size1*sizeof(float));
float *C_h1 = (float *)malloc( m_size1*n_size1*sizeof(float));
float *A_d1, *B_d1, *C_d1;
cudaMalloc((void**)&A_d1, m_size1*n_size1*sizeof(float));
cudaMalloc((void**)&B_d1, m_size1*n_size1*sizeof(float));
cudaMalloc((void**)&C_d1, m_size1*n_size1*sizeof(float));
// events for timing
cudaEvent_t startEvent1, stopEvent1;
checkCuda(cudaEventCreate(&startEvent1));
checkCuda(cudaEventCreate(&stopEvent1));
dim3 dimGrid1(width1/TILE_WIDTH_GEMM, width1/TILE_WIDTH_GEMM, 1);
dim3 dimBlock1(TILE_WIDTH_GEMM, TILE_WIDTH_GEMM, 1);
cudaMemcpy(A_d1, A_h1, m_size1*n_size1*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d1, B_h1, m_size1*n_size1*sizeof(float), cudaMemcpyHostToDevice);
// GPU based GEMM
checkCuda( cudaEventRecord(startEvent1, 0));
for (int i = 0; i < iterations1; i++) {
//gettimeofday( &start, NULL );
MatrixMultiplyKernel<<<dimGrid1, dimBlock1>>>(A_d1, B_d1, C_d1, width1);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//GFLOPs += 2e-9*width1*width1*width1/seconds;
}
checkCuda( cudaEventRecord(stopEvent1, 0) );
checkCuda( cudaEventSynchronize(stopEvent1) );
checkCuda( cudaEventElapsedTime(&ms1, startEvent1, stopEvent1) );
printf( "GPU based GEMM time: %.3f ms\n", ms1 );
cudaMemcpy(C_h1, C_d1, m_size1*n_size1*sizeof(float), cudaMemcpyDeviceToHost);
GFLOPs = iterations1*2e-9*width1*width1*width1/(ms1*1e-3);
printf( "GPU based GEMM: %.3f GFLOPs/s\n", GFLOPs );
cudaFree( A_d1 );
cudaFree( B_d1 );
cudaFree( C_d1 );
free( A_h1 );
free( B_h1 );
free( C_h1 );
printf("\n\n");
// HW2: Question 2
int m_size2 = 1024, n_size2 = 1024;
int width2 = 1024;
int iterations2 = 100;
float Mem_Acc_Rate[5] = {0};
float ms2 = 0;
float *A_h2 = (float *)malloc( m_size2*n_size2*sizeof(float));
float *B_h2 = (float *)malloc( m_size2*n_size2*sizeof(float));
float *A_d2, *B_d2;
cudaMalloc((void**)&A_d2, m_size2*n_size2*sizeof(float));
cudaMalloc((void**)&B_d2, m_size2*n_size2*sizeof(float));
// events for timing
cudaEvent_t startEvent2, stopEvent2;
checkCuda(cudaEventCreate(&startEvent2));
checkCuda(cudaEventCreate(&stopEvent2));
dim3 dimGrid2(width2/TILE_DIM, width2/TILE_DIM, 1);
dim3 dimBlock2(TILE_DIM, BLOCK_ROWS, 1);
cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice);
// Simple matrix copying
checkCuda( cudaEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
copy<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[0] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( cudaEventRecord(stopEvent2, 0) );
checkCuda( cudaEventSynchronize(stopEvent2) );
checkCuda( cudaEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Simple matrix copying time: %.3f ms\n", ms2 );
cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice);
Mem_Acc_Rate[0] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Matrix copy with shared memory
checkCuda( cudaEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
copySharedMem<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[1] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( cudaEventRecord(stopEvent2, 0) );
checkCuda( cudaEventSynchronize(stopEvent2) );
checkCuda( cudaEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Matrix copy with shared memory time: %.3f ms\n", ms2 );
cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice);
Mem_Acc_Rate[1] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Native transpose
checkCuda( cudaEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
transposeNaive<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[2] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( cudaEventRecord(stopEvent2, 0) );
checkCuda( cudaEventSynchronize(stopEvent2) );
checkCuda( cudaEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Native transpose time: %.3f ms\n", ms2 );
cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice);
Mem_Acc_Rate[2] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Coalesced transpose with block shared memory
checkCuda( cudaEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
transposeCoalesced<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[3] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( cudaEventRecord(stopEvent2, 0) );
checkCuda( cudaEventSynchronize(stopEvent2) );
checkCuda( cudaEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Coalesced transpose with block shared memory time: %.3f ms\n", ms2 );
cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice);
Mem_Acc_Rate[3] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Coalesced transpose with shared memory and matrix padding
checkCuda( cudaEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
transposeNoBankConflicts<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[4] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( cudaEventRecord(stopEvent2, 0) );
checkCuda( cudaEventSynchronize(stopEvent2) );
checkCuda( cudaEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Coalesced transpose with shared memory and matrix padding time: %.3f ms\n", ms2 );
cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice);
Mem_Acc_Rate[4] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
printf("\n");
printf( "Simple matrix copying: %.3f GB/s\n", Mem_Acc_Rate[0] );
printf( "Matrix copy with shared memory: %.3f GB/s\n", Mem_Acc_Rate[1] );
printf( "Native transpose: %.3f GB/s\n", Mem_Acc_Rate[2] );
printf( "Coalesced transpose with block shared memory: %.3f GB/s\n", Mem_Acc_Rate[3] );
printf( "Coalesced transpose with shared memory and matrix padding: %.3f GB/s\n", Mem_Acc_Rate[4] );
cudaFree( A_d2 );
cudaFree( B_d2 );
free( A_h2 );
free( B_h2 );
return 0;
} | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <assert.h>
/* To save you time, we are including all 6 variants of the loop ordering
as separate functions and then calling them using function pointers.
The reason for having separate functions that are nearly identical is
to avoid counting any extraneous processing towards the computation
time. This includes I/O accesses (printf) and conditionals (if/switch).
I/O accesses are slow and conditional/branching statements could
unfairly bias results (lower cases in switches must run through more
case statements on each iteration).
*/
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
const int TILE_WIDTH_GEMM = 16;
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
void multMat1( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is ijk loop order. */
for( i = 0; i < n; i++ )
for( j = 0; j < n; j++ )
for( k = 0; k < n; k++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat2( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is ikj loop order. */
for( i = 0; i < n; i++ )
for( k = 0; k < n; k++ )
for( j = 0; j < n; j++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat3( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is jik loop order. */
for( j = 0; j < n; j++ )
for( i = 0; i < n; i++ )
for( k = 0; k < n; k++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat4( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is jki loop order. */
for( j = 0; j < n; j++ )
for( k = 0; k < n; k++ )
for( i = 0; i < n; i++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat5( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is kij loop order. */
for( k = 0; k < n; k++ )
for( i = 0; i < n; i++ )
for( j = 0; j < n; j++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat6( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is kji loop order. */
for( k = 0; k < n; k++ )
for( j = 0; j < n; j++ )
for( i = 0; i < n; i++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
/* Question 1 */
// GPU based GEMM with SM-specific shared memory
__global__ void MatrixMultiplyKernel(const float* devM, const float* devN,float* devP, const int width){
__shared__ float sM[TILE_WIDTH_GEMM][TILE_WIDTH_GEMM];
__shared__ float sN[TILE_WIDTH_GEMM][TILE_WIDTH_GEMM];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = bx * TILE_WIDTH_GEMM + bx;
int row = by * TILE_WIDTH_GEMM + ty;
// Initialize accumulator to 0. Then multiply/add
float pValue = 0;
for (int m = 0; m < width / TILE_WIDTH_GEMM; m++) {
sM[ty][tx] = devM[row *width+(m*TILE_WIDTH_GEMM + tx)];
sN[ty][tx] = devN[col+(m *TILE_WIDTH_GEMM+ty)*width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH_GEMM; ++k)
pValue += sM[ty][k] * sN[k][tx];
__syncthreads();
}
devP[row * width + col] = pValue;
}
/* Question 2 */
// Simple matrix copying
__global__ void copy(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[(y+j)*width + x] = idata[(y+j)*width + x];
}
// Matrix copy with shared memory
__global__ void copySharedMem(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM * TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x];
}
// Native transpose
__global__ void transposeNaive(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
// Coalesced transpose with block shared memory
__global__ void transposeCoalesced(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
// Coalesced transpose with shared memory and matrix padding
__global__ void transposeNoBankConflicts(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
/* uses timing features from sys/time.h that you haven't seen before */
int main( int argc, char **argv ) {
// CPU implementation
int nmax = 1000, i;
void (*orderings[])(int,float *,float *,float *) =
{&multMat1,&multMat2,&multMat3,&multMat4,&multMat5,&multMat6};
char *names[] = {"ijk","ikj","jik","jki","kij","kji"};
float *A = (float *)malloc( nmax*nmax * sizeof(float));
float *B = (float *)malloc( nmax*nmax * sizeof(float));
float *C = (float *)malloc( nmax*nmax * sizeof(float));
struct timeval start, end;
/* fill matrices with random numbers */
for( i = 0; i < nmax*nmax; i++ ) A[i] = drand48()*2-1;
for( i = 0; i < nmax*nmax; i++ ) B[i] = drand48()*2-1;
for( i = 0; i < nmax*nmax; i++ ) C[i] = drand48()*2-1;
for( i = 0; i < 6; i++) {
/* multiply matrices and measure the time */
gettimeofday( &start, NULL );
(*orderings[i])( nmax, A, B, C );
gettimeofday( &end, NULL );
/* convert time to Gflop/s */
double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
double Gflops = 2e-9*nmax*nmax*nmax/seconds;
printf( "%s:\tn = %d, %.3f Gflop/s\n", names[i], nmax, Gflops );
}
free( A );
free( B );
free( C );
printf("\n\n");
// HW2: Question 1
int m_size1 = 1600, n_size1 = 1600;
int width1 = 1600;
int iterations1 = 100;
float ms1 = 0;
float GFLOPs = 0;
float *A_h1 = (float *)malloc( m_size1*n_size1*sizeof(float));
float *B_h1 = (float *)malloc( m_size1*n_size1*sizeof(float));
float *C_h1 = (float *)malloc( m_size1*n_size1*sizeof(float));
float *A_d1, *B_d1, *C_d1;
hipMalloc((void**)&A_d1, m_size1*n_size1*sizeof(float));
hipMalloc((void**)&B_d1, m_size1*n_size1*sizeof(float));
hipMalloc((void**)&C_d1, m_size1*n_size1*sizeof(float));
// events for timing
hipEvent_t startEvent1, stopEvent1;
checkCuda(hipEventCreate(&startEvent1));
checkCuda(hipEventCreate(&stopEvent1));
dim3 dimGrid1(width1/TILE_WIDTH_GEMM, width1/TILE_WIDTH_GEMM, 1);
dim3 dimBlock1(TILE_WIDTH_GEMM, TILE_WIDTH_GEMM, 1);
hipMemcpy(A_d1, A_h1, m_size1*n_size1*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_d1, B_h1, m_size1*n_size1*sizeof(float), hipMemcpyHostToDevice);
// GPU based GEMM
checkCuda( hipEventRecord(startEvent1, 0));
for (int i = 0; i < iterations1; i++) {
//gettimeofday( &start, NULL );
MatrixMultiplyKernel<<<dimGrid1, dimBlock1>>>(A_d1, B_d1, C_d1, width1);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//GFLOPs += 2e-9*width1*width1*width1/seconds;
}
checkCuda( hipEventRecord(stopEvent1, 0) );
checkCuda( hipEventSynchronize(stopEvent1) );
checkCuda( hipEventElapsedTime(&ms1, startEvent1, stopEvent1) );
printf( "GPU based GEMM time: %.3f ms\n", ms1 );
hipMemcpy(C_h1, C_d1, m_size1*n_size1*sizeof(float), hipMemcpyDeviceToHost);
GFLOPs = iterations1*2e-9*width1*width1*width1/(ms1*1e-3);
printf( "GPU based GEMM: %.3f GFLOPs/s\n", GFLOPs );
hipFree( A_d1 );
hipFree( B_d1 );
hipFree( C_d1 );
free( A_h1 );
free( B_h1 );
free( C_h1 );
printf("\n\n");
// HW2: Question 2
int m_size2 = 1024, n_size2 = 1024;
int width2 = 1024;
int iterations2 = 100;
float Mem_Acc_Rate[5] = {0};
float ms2 = 0;
float *A_h2 = (float *)malloc( m_size2*n_size2*sizeof(float));
float *B_h2 = (float *)malloc( m_size2*n_size2*sizeof(float));
float *A_d2, *B_d2;
hipMalloc((void**)&A_d2, m_size2*n_size2*sizeof(float));
hipMalloc((void**)&B_d2, m_size2*n_size2*sizeof(float));
// events for timing
hipEvent_t startEvent2, stopEvent2;
checkCuda(hipEventCreate(&startEvent2));
checkCuda(hipEventCreate(&stopEvent2));
dim3 dimGrid2(width2/TILE_DIM, width2/TILE_DIM, 1);
dim3 dimBlock2(TILE_DIM, BLOCK_ROWS, 1);
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
// Simple matrix copying
checkCuda( hipEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
copy<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[0] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( hipEventRecord(stopEvent2, 0) );
checkCuda( hipEventSynchronize(stopEvent2) );
checkCuda( hipEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Simple matrix copying time: %.3f ms\n", ms2 );
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
Mem_Acc_Rate[0] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Matrix copy with shared memory
checkCuda( hipEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
copySharedMem<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[1] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( hipEventRecord(stopEvent2, 0) );
checkCuda( hipEventSynchronize(stopEvent2) );
checkCuda( hipEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Matrix copy with shared memory time: %.3f ms\n", ms2 );
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
Mem_Acc_Rate[1] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Native transpose
checkCuda( hipEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
transposeNaive<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[2] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( hipEventRecord(stopEvent2, 0) );
checkCuda( hipEventSynchronize(stopEvent2) );
checkCuda( hipEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Native transpose time: %.3f ms\n", ms2 );
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
Mem_Acc_Rate[2] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Coalesced transpose with block shared memory
checkCuda( hipEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
transposeCoalesced<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[3] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( hipEventRecord(stopEvent2, 0) );
checkCuda( hipEventSynchronize(stopEvent2) );
checkCuda( hipEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Coalesced transpose with block shared memory time: %.3f ms\n", ms2 );
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
Mem_Acc_Rate[3] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Coalesced transpose with shared memory and matrix padding
checkCuda( hipEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
transposeNoBankConflicts<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[4] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( hipEventRecord(stopEvent2, 0) );
checkCuda( hipEventSynchronize(stopEvent2) );
checkCuda( hipEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Coalesced transpose with shared memory and matrix padding time: %.3f ms\n", ms2 );
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
Mem_Acc_Rate[4] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
printf("\n");
printf( "Simple matrix copying: %.3f GB/s\n", Mem_Acc_Rate[0] );
printf( "Matrix copy with shared memory: %.3f GB/s\n", Mem_Acc_Rate[1] );
printf( "Native transpose: %.3f GB/s\n", Mem_Acc_Rate[2] );
printf( "Coalesced transpose with block shared memory: %.3f GB/s\n", Mem_Acc_Rate[3] );
printf( "Coalesced transpose with shared memory and matrix padding: %.3f GB/s\n", Mem_Acc_Rate[4] );
hipFree( A_d2 );
hipFree( B_d2 );
free( A_h2 );
free( B_h2 );
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <assert.h>
/* To save you time, we are including all 6 variants of the loop ordering
as separate functions and then calling them using function pointers.
The reason for having separate functions that are nearly identical is
to avoid counting any extraneous processing towards the computation
time. This includes I/O accesses (printf) and conditionals (if/switch).
I/O accesses are slow and conditional/branching statements could
unfairly bias results (lower cases in switches must run through more
case statements on each iteration).
*/
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
const int TILE_WIDTH_GEMM = 16;
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
void multMat1( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is ijk loop order. */
for( i = 0; i < n; i++ )
for( j = 0; j < n; j++ )
for( k = 0; k < n; k++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat2( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is ikj loop order. */
for( i = 0; i < n; i++ )
for( k = 0; k < n; k++ )
for( j = 0; j < n; j++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat3( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is jik loop order. */
for( j = 0; j < n; j++ )
for( i = 0; i < n; i++ )
for( k = 0; k < n; k++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat4( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is jki loop order. */
for( j = 0; j < n; j++ )
for( k = 0; k < n; k++ )
for( i = 0; i < n; i++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat5( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is kij loop order. */
for( k = 0; k < n; k++ )
for( i = 0; i < n; i++ )
for( j = 0; j < n; j++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
void multMat6( int n, float *A, float *B, float *C ) {
int i,j,k;
/* This is kji loop order. */
for( k = 0; k < n; k++ )
for( j = 0; j < n; j++ )
for( i = 0; i < n; i++ )
C[i+j*n] += A[i+k*n]*B[k+j*n];
}
/* Question 1 */
// GPU based GEMM with SM-specific shared memory
__global__ void MatrixMultiplyKernel(const float* devM, const float* devN,float* devP, const int width){
__shared__ float sM[TILE_WIDTH_GEMM][TILE_WIDTH_GEMM];
__shared__ float sN[TILE_WIDTH_GEMM][TILE_WIDTH_GEMM];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = bx * TILE_WIDTH_GEMM + bx;
int row = by * TILE_WIDTH_GEMM + ty;
// Initialize accumulator to 0. Then multiply/add
float pValue = 0;
for (int m = 0; m < width / TILE_WIDTH_GEMM; m++) {
sM[ty][tx] = devM[row *width+(m*TILE_WIDTH_GEMM + tx)];
sN[ty][tx] = devN[col+(m *TILE_WIDTH_GEMM+ty)*width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH_GEMM; ++k)
pValue += sM[ty][k] * sN[k][tx];
__syncthreads();
}
devP[row * width + col] = pValue;
}
/* Question 2 */
// Simple matrix copying
__global__ void copy(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[(y+j)*width + x] = idata[(y+j)*width + x];
}
// Matrix copy with shared memory
__global__ void copySharedMem(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM * TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x];
}
// Native transpose
__global__ void transposeNaive(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
// Coalesced transpose with block shared memory
__global__ void transposeCoalesced(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
// Coalesced transpose with shared memory and matrix padding
__global__ void transposeNoBankConflicts(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
/* uses timing features from sys/time.h that you haven't seen before */
int main( int argc, char **argv ) {
// CPU implementation
int nmax = 1000, i;
void (*orderings[])(int,float *,float *,float *) =
{&multMat1,&multMat2,&multMat3,&multMat4,&multMat5,&multMat6};
char *names[] = {"ijk","ikj","jik","jki","kij","kji"};
float *A = (float *)malloc( nmax*nmax * sizeof(float));
float *B = (float *)malloc( nmax*nmax * sizeof(float));
float *C = (float *)malloc( nmax*nmax * sizeof(float));
struct timeval start, end;
/* fill matrices with random numbers */
for( i = 0; i < nmax*nmax; i++ ) A[i] = drand48()*2-1;
for( i = 0; i < nmax*nmax; i++ ) B[i] = drand48()*2-1;
for( i = 0; i < nmax*nmax; i++ ) C[i] = drand48()*2-1;
for( i = 0; i < 6; i++) {
/* multiply matrices and measure the time */
gettimeofday( &start, NULL );
(*orderings[i])( nmax, A, B, C );
gettimeofday( &end, NULL );
/* convert time to Gflop/s */
double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
double Gflops = 2e-9*nmax*nmax*nmax/seconds;
printf( "%s:\tn = %d, %.3f Gflop/s\n", names[i], nmax, Gflops );
}
free( A );
free( B );
free( C );
printf("\n\n");
// HW2: Question 1
int m_size1 = 1600, n_size1 = 1600;
int width1 = 1600;
int iterations1 = 100;
float ms1 = 0;
float GFLOPs = 0;
float *A_h1 = (float *)malloc( m_size1*n_size1*sizeof(float));
float *B_h1 = (float *)malloc( m_size1*n_size1*sizeof(float));
float *C_h1 = (float *)malloc( m_size1*n_size1*sizeof(float));
float *A_d1, *B_d1, *C_d1;
hipMalloc((void**)&A_d1, m_size1*n_size1*sizeof(float));
hipMalloc((void**)&B_d1, m_size1*n_size1*sizeof(float));
hipMalloc((void**)&C_d1, m_size1*n_size1*sizeof(float));
// events for timing
hipEvent_t startEvent1, stopEvent1;
checkCuda(hipEventCreate(&startEvent1));
checkCuda(hipEventCreate(&stopEvent1));
dim3 dimGrid1(width1/TILE_WIDTH_GEMM, width1/TILE_WIDTH_GEMM, 1);
dim3 dimBlock1(TILE_WIDTH_GEMM, TILE_WIDTH_GEMM, 1);
hipMemcpy(A_d1, A_h1, m_size1*n_size1*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_d1, B_h1, m_size1*n_size1*sizeof(float), hipMemcpyHostToDevice);
// GPU based GEMM
checkCuda( hipEventRecord(startEvent1, 0));
for (int i = 0; i < iterations1; i++) {
//gettimeofday( &start, NULL );
MatrixMultiplyKernel<<<dimGrid1, dimBlock1>>>(A_d1, B_d1, C_d1, width1);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//GFLOPs += 2e-9*width1*width1*width1/seconds;
}
checkCuda( hipEventRecord(stopEvent1, 0) );
checkCuda( hipEventSynchronize(stopEvent1) );
checkCuda( hipEventElapsedTime(&ms1, startEvent1, stopEvent1) );
printf( "GPU based GEMM time: %.3f ms\n", ms1 );
hipMemcpy(C_h1, C_d1, m_size1*n_size1*sizeof(float), hipMemcpyDeviceToHost);
GFLOPs = iterations1*2e-9*width1*width1*width1/(ms1*1e-3);
printf( "GPU based GEMM: %.3f GFLOPs/s\n", GFLOPs );
hipFree( A_d1 );
hipFree( B_d1 );
hipFree( C_d1 );
free( A_h1 );
free( B_h1 );
free( C_h1 );
printf("\n\n");
// HW2: Question 2
int m_size2 = 1024, n_size2 = 1024;
int width2 = 1024;
int iterations2 = 100;
float Mem_Acc_Rate[5] = {0};
float ms2 = 0;
float *A_h2 = (float *)malloc( m_size2*n_size2*sizeof(float));
float *B_h2 = (float *)malloc( m_size2*n_size2*sizeof(float));
float *A_d2, *B_d2;
hipMalloc((void**)&A_d2, m_size2*n_size2*sizeof(float));
hipMalloc((void**)&B_d2, m_size2*n_size2*sizeof(float));
// events for timing
hipEvent_t startEvent2, stopEvent2;
checkCuda(hipEventCreate(&startEvent2));
checkCuda(hipEventCreate(&stopEvent2));
dim3 dimGrid2(width2/TILE_DIM, width2/TILE_DIM, 1);
dim3 dimBlock2(TILE_DIM, BLOCK_ROWS, 1);
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
// Simple matrix copying
checkCuda( hipEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
copy<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[0] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( hipEventRecord(stopEvent2, 0) );
checkCuda( hipEventSynchronize(stopEvent2) );
checkCuda( hipEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Simple matrix copying time: %.3f ms\n", ms2 );
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
Mem_Acc_Rate[0] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Matrix copy with shared memory
checkCuda( hipEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
copySharedMem<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[1] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( hipEventRecord(stopEvent2, 0) );
checkCuda( hipEventSynchronize(stopEvent2) );
checkCuda( hipEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Matrix copy with shared memory time: %.3f ms\n", ms2 );
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
Mem_Acc_Rate[1] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Native transpose
checkCuda( hipEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
transposeNaive<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[2] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( hipEventRecord(stopEvent2, 0) );
checkCuda( hipEventSynchronize(stopEvent2) );
checkCuda( hipEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Native transpose time: %.3f ms\n", ms2 );
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
Mem_Acc_Rate[2] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Coalesced transpose with block shared memory
checkCuda( hipEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
transposeCoalesced<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[3] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( hipEventRecord(stopEvent2, 0) );
checkCuda( hipEventSynchronize(stopEvent2) );
checkCuda( hipEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Coalesced transpose with block shared memory time: %.3f ms\n", ms2 );
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
Mem_Acc_Rate[3] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
// Coalesced transpose with shared memory and matrix padding
checkCuda( hipEventRecord(startEvent2, 0));
for (int i = 0; i < iterations2; i++) {
//gettimeofday( &start, NULL );
transposeNoBankConflicts<<<dimGrid2, dimBlock2>>>(A_d2, B_d2);
//gettimeofday( &end, NULL );
//double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
//Mem_Acc_Rate[4] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9);
}
checkCuda( hipEventRecord(stopEvent2, 0) );
checkCuda( hipEventSynchronize(stopEvent2) );
checkCuda( hipEventElapsedTime(&ms2, startEvent2, stopEvent2) );
printf( "Coalesced transpose with shared memory and matrix padding time: %.3f ms\n", ms2 );
hipMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), hipMemcpyHostToDevice);
Mem_Acc_Rate[4] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9);
printf("\n");
printf( "Simple matrix copying: %.3f GB/s\n", Mem_Acc_Rate[0] );
printf( "Matrix copy with shared memory: %.3f GB/s\n", Mem_Acc_Rate[1] );
printf( "Native transpose: %.3f GB/s\n", Mem_Acc_Rate[2] );
printf( "Coalesced transpose with block shared memory: %.3f GB/s\n", Mem_Acc_Rate[3] );
printf( "Coalesced transpose with shared memory and matrix padding: %.3f GB/s\n", Mem_Acc_Rate[4] );
hipFree( A_d2 );
hipFree( B_d2 );
free( A_h2 );
free( B_h2 );
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z20MatrixMultiplyKernelPKfS0_Pfi
.globl _Z20MatrixMultiplyKernelPKfS0_Pfi
.p2align 8
.type _Z20MatrixMultiplyKernelPKfS0_Pfi,@function
_Z20MatrixMultiplyKernelPKfS0_Pfi:
s_load_b32 s3, s[0:1], 0x18
v_bfe_u32 v3, v0, 10, 10
v_mov_b32_e32 v1, 0
s_mul_i32 s2, s14, 17
s_delay_alu instid0(VALU_DEP_2)
v_lshl_add_u32 v2, s15, 4, v3
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s3, 16
s_cbranch_scc1 .LBB0_5
v_and_b32_e32 v6, 0x3ff, v0
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v4, 6, v3
s_ashr_i32 s8, s3, 31
s_mov_b32 s9, 0
v_lshlrev_b32_e32 v7, 2, v6
s_lshr_b32 s8, s8, 28
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s8, s3, s8
s_ashr_i32 s8, s8, 4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v5, 0x400, v7
v_mad_u64_u32 v[0:1], null, v2, s3, v[6:7]
v_dual_mov_b32 v1, 0 :: v_dual_add_nc_u32 v6, v4, v7
v_add_nc_u32_e32 v7, v5, v4
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
s_lshl_b32 s10, s9, 4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_nc_u32_e32 v9, s10, v3
v_add_nc_u32_e32 v8, s10, v0
s_mov_b32 s10, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[10:11], null, v9, s3, s[2:3]
v_ashrrev_i32_e32 v9, 31, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[8:9], 2, v[8:9]
v_ashrrev_i32_e32 v11, 31, v10
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s4, v8
v_lshlrev_b64 v[10:11], 2, v[10:11]
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v9, vcc_lo, s5, v9, vcc_lo
v_add_co_u32 v10, vcc_lo, s6, v10
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v11, vcc_lo
global_load_b32 v9, v[8:9], off
global_load_b32 v10, v[10:11], off
v_mov_b32_e32 v8, v5
s_waitcnt vmcnt(1)
ds_store_b32 v6, v9
s_waitcnt vmcnt(0)
ds_store_b32 v7, v10
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_3:
v_add_nc_u32_e32 v9, s10, v4
s_add_i32 s10, s10, 4
ds_load_b32 v10, v8
ds_load_b32 v9, v9
v_add_nc_u32_e32 v8, 64, v8
s_cmp_eq_u32 s10, 64
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v1, v9, v10
s_cbranch_scc0 .LBB0_3
s_add_i32 s9, s9, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s9, s8
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_2
.LBB0_5:
s_set_inst_prefetch_distance 0x2
s_load_b64 s[0:1], s[0:1], 0x10
v_mad_u64_u32 v[3:4], null, v2, s3, s[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[2:3], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_store_b32 v[2:3], v1, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z20MatrixMultiplyKernelPKfS0_Pfi
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z20MatrixMultiplyKernelPKfS0_Pfi, .Lfunc_end0-_Z20MatrixMultiplyKernelPKfS0_Pfi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4copyPfPKf
.globl _Z4copyPfPKf
.p2align 8
.type _Z4copyPfPKf,@function
_Z4copyPfPKf:
s_load_b32 s4, s[0:1], 0x10
v_bfe_u32 v1, v0, 10, 10
s_load_b128 s[0:3], s[0:1], 0x0
v_and_b32_e32 v0, 0x3ff, v0
s_lshl_b32 s5, s14, 5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshl_add_u32 v1, s15, 5, v1
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v1, s4, v1
s_lshl_b32 s4, s4, 8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v1, 5, v1
v_add3_u32 v0, v0, v1, s5
s_mov_b32 s5, -8
.p2align 6
.LBB1_1:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v1, 31, v0
s_add_i32 s5, s5, 8
s_cmp_gt_u32 s5, 23
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[1:2], 2, v[0:1]
v_add_nc_u32_e32 v0, s4, v0
v_add_co_u32 v3, vcc_lo, s2, v1
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v2, vcc_lo
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0)
global_store_b32 v[1:2], v3, off
s_cbranch_scc0 .LBB1_1
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4copyPfPKf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z4copyPfPKf, .Lfunc_end1-_Z4copyPfPKf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13copySharedMemPfPKf
.globl _Z13copySharedMemPfPKf
.p2align 8
.type _Z13copySharedMemPfPKf,@function
_Z13copySharedMemPfPKf:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x10
s_load_b64 s[2:3], s[0:1], 0x8
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_lshl_b32 s5, s14, 5
s_mov_b32 s7, -8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v1, s15, 5, v2
v_lshlrev_b32_e32 v0, 2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_lshl_add_u32 v5, v2, 7, v0
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v4, s4, v1
s_lshl_b32 s6, s4, 8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v1, 5, v4
v_add3_u32 v0, v3, v1, s5
.LBB2_1:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v1, 31, v0
s_add_i32 s7, s7, 8
s_cmp_gt_u32 s7, 23
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[6:7], 2, v[0:1]
v_add_nc_u32_e32 v0, s6, v0
v_add_co_u32 v6, vcc_lo, s2, v6
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b32 v1, v[6:7], off
s_waitcnt vmcnt(0)
ds_store_b32 v5, v1
v_add_nc_u32_e32 v5, 0x400, v5
s_cbranch_scc0 .LBB2_1
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 5, v4
v_lshlrev_b32_e32 v1, 2, v3
s_lshl_b32 s2, s4, 8
s_mov_b32 s3, -8
s_waitcnt lgkmcnt(0)
v_add3_u32 v0, v3, v0, s5
v_lshl_add_u32 v2, v2, 7, v1
s_barrier
buffer_gl0_inv
.LBB2_3:
ds_load_b32 v5, v2
v_ashrrev_i32_e32 v1, 31, v0
v_add_nc_u32_e32 v2, 0x400, v2
s_add_i32 s3, s3, 8
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
s_cmp_gt_u32 s3, 23
v_lshlrev_b64 v[3:4], 2, v[0:1]
v_add_nc_u32_e32 v0, s2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[3:4], v5, off
s_cbranch_scc0 .LBB2_3
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13copySharedMemPfPKf
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z13copySharedMemPfPKf, .Lfunc_end2-_Z13copySharedMemPfPKf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z14transposeNaivePfPKf
.globl _Z14transposeNaivePfPKf
.p2align 8
.type _Z14transposeNaivePfPKf,@function
_Z14transposeNaivePfPKf:
s_load_b32 s4, s[0:1], 0x10
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_lshl_b32 s5, s14, 5
s_lshl_b32 s6, s15, 5
s_load_b128 s[0:3], s[0:1], 0x0
v_add_nc_u32_e32 v2, s5, v1
v_add_nc_u32_e32 v3, s6, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v2, s4, v2
v_mul_lo_u32 v3, s4, v3
s_lshl_b32 s4, s4, 8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b32_e32 v2, 5, v2
v_lshlrev_b32_e32 v3, 5, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add3_u32 v2, v0, v2, s6
v_add3_u32 v0, v1, v3, s5
s_mov_b32 s5, -8
.p2align 6
.LBB3_1:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v1, 31, v0
s_add_i32 s5, s5, 8
s_cmp_gt_u32 s5, 23
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[0:1]
v_add_nc_u32_e32 v0, s4, v0
v_add_co_u32 v3, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
global_load_b32 v1, v[3:4], off
v_add_nc_u32_e32 v3, s5, v2
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_add_co_u32 v3, vcc_lo, s0, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[3:4], v1, off
s_cbranch_scc0 .LBB3_1
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14transposeNaivePfPKf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z14transposeNaivePfPKf, .Lfunc_end3-_Z14transposeNaivePfPKf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z18transposeCoalescedPfPKf
.globl _Z18transposeCoalescedPfPKf
.p2align 8
.type _Z18transposeCoalescedPfPKf,@function
_Z18transposeCoalescedPfPKf:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x10
s_load_b64 s[2:3], s[0:1], 0x8
v_bfe_u32 v2, v0, 10, 10
s_lshl_b32 s5, s15, 5
v_and_b32_e32 v3, 0x3ff, v0
s_lshl_b32 s6, s14, 5
s_mov_b32 s8, -8
v_add_nc_u32_e32 v1, s5, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v0, 2, v3
v_lshl_add_u32 v4, v2, 7, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, s4, v1
s_lshl_b32 s7, s4, 8
v_lshlrev_b32_e32 v1, 5, v1
s_delay_alu instid0(VALU_DEP_1)
v_add3_u32 v0, v3, v1, s6
.LBB4_1:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v1, 31, v0
s_add_i32 s8, s8, 8
s_cmp_gt_u32 s8, 23
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[5:6], 2, v[0:1]
v_add_nc_u32_e32 v0, s7, v0
v_add_co_u32 v5, vcc_lo, s2, v5
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v6, vcc_lo
global_load_b32 v1, v[5:6], off
s_waitcnt vmcnt(0)
ds_store_b32 v4, v1
v_add_nc_u32_e32 v4, 0x400, v4
s_cbranch_scc0 .LBB4_1
v_add_nc_u32_e32 v0, s6, v2
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 2, v2
s_lshl_b32 s2, s4, 8
s_mov_b32 s3, -8
v_mul_lo_u32 v0, s4, v0
s_waitcnt lgkmcnt(0)
v_lshl_add_u32 v2, v3, 7, v1
s_barrier
buffer_gl0_inv
v_lshlrev_b32_e32 v0, 5, v0
s_delay_alu instid0(VALU_DEP_1)
v_add3_u32 v0, v3, v0, s5
.LBB4_3:
ds_load_b32 v5, v2
v_ashrrev_i32_e32 v1, 31, v0
v_add_nc_u32_e32 v2, 32, v2
s_add_i32 s3, s3, 8
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
s_cmp_gt_u32 s3, 23
v_lshlrev_b64 v[3:4], 2, v[0:1]
v_add_nc_u32_e32 v0, s2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[3:4], v5, off
s_cbranch_scc0 .LBB4_3
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18transposeCoalescedPfPKf
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z18transposeCoalescedPfPKf, .Lfunc_end4-_Z18transposeCoalescedPfPKf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z24transposeNoBankConflictsPfPKf
.globl _Z24transposeNoBankConflictsPfPKf
.p2align 8
.type _Z24transposeNoBankConflictsPfPKf,@function
_Z24transposeNoBankConflictsPfPKf:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x10
s_load_b64 s[2:3], s[0:1], 0x8
v_bfe_u32 v2, v0, 10, 10
s_lshl_b32 s5, s15, 5
v_and_b32_e32 v3, 0x3ff, v0
s_lshl_b32 s6, s14, 5
s_mov_b32 s8, -8
v_add_nc_u32_e32 v1, s5, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v0, 2, v3
v_mad_u32_u24 v4, v2, 0x84, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, s4, v1
s_lshl_b32 s7, s4, 8
v_lshlrev_b32_e32 v1, 5, v1
s_delay_alu instid0(VALU_DEP_1)
v_add3_u32 v0, v3, v1, s6
.LBB5_1:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v1, 31, v0
s_add_i32 s8, s8, 8
s_cmp_gt_u32 s8, 23
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[5:6], 2, v[0:1]
v_add_nc_u32_e32 v0, s7, v0
v_add_co_u32 v5, vcc_lo, s2, v5
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v6, vcc_lo
global_load_b32 v1, v[5:6], off
s_waitcnt vmcnt(0)
ds_store_b32 v4, v1
v_add_nc_u32_e32 v4, 0x420, v4
s_cbranch_scc0 .LBB5_1
v_add_nc_u32_e32 v0, s6, v2
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 2, v2
s_lshl_b32 s2, s4, 8
s_mov_b32 s3, -8
v_mul_lo_u32 v0, s4, v0
s_waitcnt lgkmcnt(0)
v_mad_u32_u24 v2, v3, 0x84, v1
s_barrier
buffer_gl0_inv
v_lshlrev_b32_e32 v0, 5, v0
s_delay_alu instid0(VALU_DEP_1)
v_add3_u32 v0, v3, v0, s5
.LBB5_3:
ds_load_b32 v5, v2
v_ashrrev_i32_e32 v1, 31, v0
v_add_nc_u32_e32 v2, 32, v2
s_add_i32 s3, s3, 8
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
s_cmp_gt_u32 s3, 23
v_lshlrev_b64 v[3:4], 2, v[0:1]
v_add_nc_u32_e32 v0, s2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[3:4], v5, off
s_cbranch_scc0 .LBB5_3
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z24transposeNoBankConflictsPfPKf
.amdhsa_group_segment_fixed_size 4224
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end5:
.size _Z24transposeNoBankConflictsPfPKf, .Lfunc_end5-_Z24transposeNoBankConflictsPfPKf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z20MatrixMultiplyKernelPKfS0_Pfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z20MatrixMultiplyKernelPKfS0_Pfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4copyPfPKf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4copyPfPKf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13copySharedMemPfPKf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13copySharedMemPfPKf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14transposeNaivePfPKf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14transposeNaivePfPKf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18transposeCoalescedPfPKf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z18transposeCoalescedPfPKf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4224
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z24transposeNoBankConflictsPfPKf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z24transposeNoBankConflictsPfPKf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "BasicTool.cuh"
//½Ç¶Èת»¡¶È
double AngleToRad(double angle)
{
double temp;
temp=angle*PI;
return temp/180;
}
//¼ÆËãÁ½µã¼ä¾àÀë
float CalculateDistanceOfBipartite(Point a,Point b)
{
return sqrt((a.x-b.x)*(a.x-b.x)+(a.y-b.y)*(a.y-b.y)+(a.z-b.z)*(a.z-b.z));
}
void UitizeVector(float *a,float *b,float *c)//¹éÒ»»¯ÏòÁ¿
{
float length=sqrt((*a)*(*a)+(*b)*(*b)+(*c)*(*c));
*a=*a/length;
*b=*b/length;
*c=*c/length;
}
Point GetReflectedVector(Point d,Point n)//¸ù¾Ý·´ÉäÃæµÄµ¥Î»·¨ÏòÁ¿ºÍÈëÉäÏòÁ¿µÃµ½·´ÉäÏòÁ¿
{
Point reflectedPoint;
float temp=2*(d.x*n.x+d.y*n.y+d.z*n.z);
reflectedPoint.x=d.x-temp*n.x;
reflectedPoint.y=d.y-temp*n.y;
reflectedPoint.z=d.z-temp*n.z;
return reflectedPoint;
}
Ray CalculateReflectedRayOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)//µ¥ÌõÉäÏßÓëËùÓÐÈý½ÇÃæÅн»
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i]. B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i]. C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
int remain=i%12;
if(t_numerator/denominator<t&&t_numerator/denominator>1&&u>=0&&u<=1&&v>0&&(u+v)<1&&v<=1&&remain<8)
{
*flag=1;
*reflectedFace=i;
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face[i].A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face[i].A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face[i].A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
}
return reflectedRay;
}
Ray CalculateVirtualReflectedRayOnCPU(Ray incidentRay,Face face)//µÃµ½µ¥ÌõÉäÏßÓëij¸öÈý½ÇÃæµÄÐéÄâ·´ÉäÉäÏß
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face.B.x-face.A.x,b2=face.B.y-face.A.y,b3=face.B.z-face.A.z;
float c1=face.C.x-face.A.x,c2=face.C.y-face.A.y,c3=face.C.z-face.A.z;
float x1=incidentRay.originalPoint.x-face.A.x,x2=incidentRay.originalPoint.y-face.A.y,x3=incidentRay.originalPoint.z-face.A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1)
{
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face.A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face.A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face.A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
return reflectedRay;
}
Ray CalculateReflectedRayWithTerrainOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)//µ¥ÌõÉäÏßÓëËùÓÐÈý½ÇÃæÅн»
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i].B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i].C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1&&u>=0&&u<=1&&v>0&&(u+v)<1&&v<=1)
{
*flag=1;
*reflectedFace=i;
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face[i].A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face[i].A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face[i].A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
}
return reflectedRay;
}
//¼ÆËãÁ½ÉäÏß½»µã £¨ÓÃÓÚÇóÈÆÉäÐéÄâ·¢Éäµã£©
Point GetIntersectionOfRays(Ray ray1,Ray ray2)
{
float a1=ray1.direction.x,a2=ray1.direction.y,a3=ray1.direction.z;
float b1=-ray2.direction.x,b2=-ray2.direction.y,b3=-ray2.direction.z;
float c1=ray2.originalPoint.x-ray1.originalPoint.x,c2=ray2.originalPoint.y-ray1.originalPoint.y,c3=ray2.originalPoint.z-ray1.originalPoint.z;
float det=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
Point intersection;
intersection.x=0;intersection.y=0;intersection.z=0;
if(abs(det)<0.001)
{
if (abs(c1*b2-c2*b1)<0.001)
{
intersection.x=ray1.originalPoint.x;
intersection.y=ray1.originalPoint.y;
intersection.z=ray1.originalPoint.z;
}
else if ((abs(b2*a1-b1*a2)>0.00001))
{
float u=(c1*b2-c2*b1)/(b2*a1-b1*a2);
//float v=(c2*a1-c1*a2)/(b2*a1-b1*a2);
intersection.x=ray1.originalPoint.x+u*a1;
intersection.y=ray1.originalPoint.y+u*a2;
intersection.z=ray1.originalPoint.z+u*a3;
}
}
else
{
intersection.x=0;intersection.y=0;intersection.z=0;
}
return intersection;
}
bool JudgePointEqual(Point a, Point b)//ÅжÏÁ½¸öµãÊÇ·ñÏàµÈ
{
if(a.x == b.x && a.y == b.y && a.z == b.z)
return true;
return false;
}
void ExchangeTwoRay(Ray *ray1, Ray *ray2)//½»»»Á½ÌõÉäÏß
{
Ray tempRay;
tempRay = *ray1;
*ray1 = *ray2;
*ray2 = tempRay;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "BasicTool.cuh"
//½Ç¶Èת»¡¶È
double AngleToRad(double angle)
{
double temp;
temp=angle*PI;
return temp/180;
}
//¼ÆËãÁ½µã¼ä¾àÀë
float CalculateDistanceOfBipartite(Point a,Point b)
{
return sqrt((a.x-b.x)*(a.x-b.x)+(a.y-b.y)*(a.y-b.y)+(a.z-b.z)*(a.z-b.z));
}
void UitizeVector(float *a,float *b,float *c)//¹éÒ»»¯ÏòÁ¿
{
float length=sqrt((*a)*(*a)+(*b)*(*b)+(*c)*(*c));
*a=*a/length;
*b=*b/length;
*c=*c/length;
}
Point GetReflectedVector(Point d,Point n)//¸ù¾Ý·´ÉäÃæµÄµ¥Î»·¨ÏòÁ¿ºÍÈëÉäÏòÁ¿µÃµ½·´ÉäÏòÁ¿
{
Point reflectedPoint;
float temp=2*(d.x*n.x+d.y*n.y+d.z*n.z);
reflectedPoint.x=d.x-temp*n.x;
reflectedPoint.y=d.y-temp*n.y;
reflectedPoint.z=d.z-temp*n.z;
return reflectedPoint;
}
Ray CalculateReflectedRayOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)//µ¥ÌõÉäÏßÓëËùÓÐÈý½ÇÃæÅн»
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i]. B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i]. C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
int remain=i%12;
if(t_numerator/denominator<t&&t_numerator/denominator>1&&u>=0&&u<=1&&v>0&&(u+v)<1&&v<=1&&remain<8)
{
*flag=1;
*reflectedFace=i;
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face[i].A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face[i].A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face[i].A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
}
return reflectedRay;
}
Ray CalculateVirtualReflectedRayOnCPU(Ray incidentRay,Face face)//µÃµ½µ¥ÌõÉäÏßÓëij¸öÈý½ÇÃæµÄÐéÄâ·´ÉäÉäÏß
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face.B.x-face.A.x,b2=face.B.y-face.A.y,b3=face.B.z-face.A.z;
float c1=face.C.x-face.A.x,c2=face.C.y-face.A.y,c3=face.C.z-face.A.z;
float x1=incidentRay.originalPoint.x-face.A.x,x2=incidentRay.originalPoint.y-face.A.y,x3=incidentRay.originalPoint.z-face.A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1)
{
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face.A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face.A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face.A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
return reflectedRay;
}
Ray CalculateReflectedRayWithTerrainOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)//µ¥ÌõÉäÏßÓëËùÓÐÈý½ÇÃæÅн»
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i].B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i].C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1&&u>=0&&u<=1&&v>0&&(u+v)<1&&v<=1)
{
*flag=1;
*reflectedFace=i;
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face[i].A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face[i].A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face[i].A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
}
return reflectedRay;
}
//¼ÆËãÁ½ÉäÏß½»µã £¨ÓÃÓÚÇóÈÆÉäÐéÄâ·¢Éäµã£©
Point GetIntersectionOfRays(Ray ray1,Ray ray2)
{
float a1=ray1.direction.x,a2=ray1.direction.y,a3=ray1.direction.z;
float b1=-ray2.direction.x,b2=-ray2.direction.y,b3=-ray2.direction.z;
float c1=ray2.originalPoint.x-ray1.originalPoint.x,c2=ray2.originalPoint.y-ray1.originalPoint.y,c3=ray2.originalPoint.z-ray1.originalPoint.z;
float det=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
Point intersection;
intersection.x=0;intersection.y=0;intersection.z=0;
if(abs(det)<0.001)
{
if (abs(c1*b2-c2*b1)<0.001)
{
intersection.x=ray1.originalPoint.x;
intersection.y=ray1.originalPoint.y;
intersection.z=ray1.originalPoint.z;
}
else if ((abs(b2*a1-b1*a2)>0.00001))
{
float u=(c1*b2-c2*b1)/(b2*a1-b1*a2);
//float v=(c2*a1-c1*a2)/(b2*a1-b1*a2);
intersection.x=ray1.originalPoint.x+u*a1;
intersection.y=ray1.originalPoint.y+u*a2;
intersection.z=ray1.originalPoint.z+u*a3;
}
}
else
{
intersection.x=0;intersection.y=0;intersection.z=0;
}
return intersection;
}
bool JudgePointEqual(Point a, Point b)//ÅжÏÁ½¸öµãÊÇ·ñÏàµÈ
{
if(a.x == b.x && a.y == b.y && a.z == b.z)
return true;
return false;
}
void ExchangeTwoRay(Ray *ray1, Ray *ray2)//½»»»Á½ÌõÉäÏß
{
Ray tempRay;
tempRay = *ray1;
*ray1 = *ray2;
*ray2 = tempRay;
} | .file "tmpxft_001241a6_00000000-6_BasicTool.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3812:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3812:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10AngleToRadd
.type _Z10AngleToRadd, @function
_Z10AngleToRadd:
.LFB3800:
.cfi_startproc
endbr64
mulsd .LC0(%rip), %xmm0
divsd .LC1(%rip), %xmm0
ret
.cfi_endproc
.LFE3800:
.size _Z10AngleToRadd, .-_Z10AngleToRadd
.globl _Z28CalculateDistanceOfBipartite5PointS_
.type _Z28CalculateDistanceOfBipartite5PointS_, @function
_Z28CalculateDistanceOfBipartite5PointS_:
.LFB3801:
.cfi_startproc
endbr64
movq %xmm0, -16(%rsp)
movq %xmm2, -32(%rsp)
movss -16(%rsp), %xmm2
subss -32(%rsp), %xmm2
movss -12(%rsp), %xmm0
subss -28(%rsp), %xmm0
subss %xmm3, %xmm1
mulss %xmm2, %xmm2
mulss %xmm0, %xmm0
addss %xmm0, %xmm2
mulss %xmm1, %xmm1
addss %xmm1, %xmm2
movaps %xmm2, %xmm0
sqrtss %xmm0, %xmm0
ret
.cfi_endproc
.LFE3801:
.size _Z28CalculateDistanceOfBipartite5PointS_, .-_Z28CalculateDistanceOfBipartite5PointS_
.globl _Z12UitizeVectorPfS_S_
.type _Z12UitizeVectorPfS_S_, @function
_Z12UitizeVectorPfS_S_:
.LFB3802:
.cfi_startproc
endbr64
movss (%rdi), %xmm1
movss (%rsi), %xmm3
movss (%rdx), %xmm2
movaps %xmm1, %xmm0
mulss %xmm1, %xmm0
mulss %xmm3, %xmm3
addss %xmm3, %xmm0
mulss %xmm2, %xmm2
addss %xmm2, %xmm0
sqrtss %xmm0, %xmm0
divss %xmm0, %xmm1
movss %xmm1, (%rdi)
movss (%rsi), %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%rsi)
movss (%rdx), %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%rdx)
ret
.cfi_endproc
.LFE3802:
.size _Z12UitizeVectorPfS_S_, .-_Z12UitizeVectorPfS_S_
.globl _Z18GetReflectedVector5PointS_
.type _Z18GetReflectedVector5PointS_, @function
_Z18GetReflectedVector5PointS_:
.LFB3803:
.cfi_startproc
endbr64
movq %xmm0, -40(%rsp)
movq %xmm2, -56(%rsp)
movss -40(%rsp), %xmm5
movss -36(%rsp), %xmm2
movss -56(%rsp), %xmm6
movss -52(%rsp), %xmm4
movaps %xmm5, %xmm0
mulss %xmm6, %xmm0
movaps %xmm2, %xmm7
mulss %xmm4, %xmm7
addss %xmm7, %xmm0
movaps %xmm1, %xmm7
mulss %xmm3, %xmm7
addss %xmm7, %xmm0
addss %xmm0, %xmm0
mulss %xmm0, %xmm6
subss %xmm6, %xmm5
movss %xmm5, -20(%rsp)
mulss %xmm0, %xmm4
subss %xmm4, %xmm2
movss %xmm2, -16(%rsp)
mulss %xmm0, %xmm3
movq -20(%rsp), %xmm0
subss %xmm3, %xmm1
ret
.cfi_endproc
.LFE3803:
.size _Z18GetReflectedVector5PointS_, .-_Z18GetReflectedVector5PointS_
.globl _Z26CalculateReflectedRayOnCPU3RayP4FaceiPiS2_
.type _Z26CalculateReflectedRayOnCPU3RayP4FaceiPiS2_, @function
_Z26CalculateReflectedRayOnCPU3RayP4FaceiPiS2_:
.LFB3804:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r13
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $0x00000000, (%rdi)
movl $0x00000000, 4(%rdi)
movl $0x00000000, 8(%rdi)
movl $0x00000000, 12(%rdi)
movl $0x00000000, 16(%rdi)
movl $0x00000000, 20(%rdi)
testl %edx, %edx
jle .L7
movl %edx, %r12d
movq %rcx, %r14
movq %r8, %r15
movq %rsi, %rbx
movl $0, %ebp
movl .LC2(%rip), %ecx
jmp .L17
.L9:
addl $1, %ebp
addq $36, %rbx
cmpl %ebp, %r12d
je .L7
.L17:
movss 140(%rsp), %xmm10
xorps .LC4(%rip), %xmm10
movaps %xmm10, %xmm13
movss 144(%rsp), %xmm6
xorps .LC4(%rip), %xmm6
movss 148(%rsp), %xmm5
xorps .LC4(%rip), %xmm5
movss (%rbx), %xmm11
movss 12(%rbx), %xmm8
subss %xmm11, %xmm8
movss 4(%rbx), %xmm14
movss 16(%rbx), %xmm2
subss %xmm14, %xmm2
movss 8(%rbx), %xmm15
movss 20(%rbx), %xmm1
subss %xmm15, %xmm1
movss 24(%rbx), %xmm7
subss %xmm11, %xmm7
movss 28(%rbx), %xmm3
subss %xmm14, %xmm3
movss 32(%rbx), %xmm4
subss %xmm15, %xmm4
movaps %xmm2, %xmm9
mulss %xmm4, %xmm9
movaps %xmm1, %xmm0
mulss %xmm3, %xmm0
subss %xmm0, %xmm9
movaps %xmm6, %xmm10
mulss %xmm4, %xmm10
movaps %xmm5, %xmm0
mulss %xmm3, %xmm0
subss %xmm0, %xmm10
movaps %xmm6, %xmm12
mulss %xmm1, %xmm12
movaps %xmm5, %xmm0
mulss %xmm2, %xmm0
subss %xmm0, %xmm12
movss %xmm13, 12(%rsp)
movaps %xmm13, %xmm0
mulss %xmm9, %xmm0
movss %xmm10, 8(%rsp)
movaps %xmm10, %xmm13
mulss %xmm8, %xmm13
subss %xmm13, %xmm0
movaps %xmm12, %xmm13
mulss %xmm7, %xmm13
addss %xmm13, %xmm0
movaps %xmm0, %xmm13
andps .LC5(%rip), %xmm13
cvtss2sd %xmm13, %xmm13
comisd .LC6(%rip), %xmm13
jbe .L9
movss 128(%rsp), %xmm13
subss %xmm11, %xmm13
movss %xmm13, 16(%rsp)
movss 132(%rsp), %xmm11
subss %xmm14, %xmm11
movaps %xmm11, %xmm10
movss 136(%rsp), %xmm13
subss %xmm15, %xmm13
movaps %xmm11, %xmm15
mulss %xmm4, %xmm15
movaps %xmm3, %xmm11
mulss %xmm13, %xmm11
movaps %xmm15, %xmm14
subss %xmm11, %xmm14
movss %xmm10, 28(%rsp)
movaps %xmm10, %xmm11
mulss %xmm1, %xmm11
movaps %xmm2, %xmm15
mulss %xmm13, %xmm15
movss %xmm15, 20(%rsp)
movss 16(%rsp), %xmm15
mulss %xmm9, %xmm15
movaps %xmm14, %xmm10
mulss %xmm8, %xmm10
subss %xmm10, %xmm15
movss %xmm15, 24(%rsp)
movaps %xmm11, %xmm10
subss 20(%rsp), %xmm10
movaps %xmm10, %xmm15
mulss %xmm7, %xmm15
addss 24(%rsp), %xmm15
divss %xmm0, %xmm15
movss %xmm15, 24(%rsp)
movd %ecx, %xmm10
comiss %xmm15, %xmm10
jbe .L9
comiss .LC7(%rip), %xmm15
jbe .L9
mulss %xmm13, %xmm6
mulss 28(%rsp), %xmm5
subss %xmm5, %xmm6
movaps %xmm14, %xmm5
movss 12(%rsp), %xmm13
mulss %xmm13, %xmm5
movss 16(%rsp), %xmm14
movss 8(%rsp), %xmm10
mulss %xmm14, %xmm10
subss %xmm10, %xmm5
movaps %xmm6, %xmm10
mulss %xmm7, %xmm10
addss %xmm10, %xmm5
divss %xmm0, %xmm5
pxor %xmm10, %xmm10
comiss %xmm10, %xmm5
jb .L9
movss .LC7(%rip), %xmm10
comiss %xmm5, %xmm10
jb .L9
movss 20(%rsp), %xmm10
subss %xmm11, %xmm10
mulss %xmm13, %xmm10
mulss %xmm8, %xmm6
subss %xmm6, %xmm10
mulss %xmm14, %xmm12
movaps %xmm10, %xmm6
addss %xmm12, %xmm6
divss %xmm0, %xmm6
pxor %xmm0, %xmm0
comiss %xmm0, %xmm6
jbe .L9
movss .LC7(%rip), %xmm10
movaps %xmm5, %xmm0
addss %xmm6, %xmm0
comiss %xmm0, %xmm10
jbe .L9
movslq %ebp, %rax
imulq $715827883, %rax, %rax
sarq $33, %rax
movl %ebp, %esi
sarl $31, %esi
subl %esi, %eax
leal (%rax,%rax,2), %eax
sall $2, %eax
movl %ebp, %esi
subl %eax, %esi
cmpl $7, %esi
jg .L9
movl $1, (%r15)
movl %ebp, (%r14)
movaps %xmm8, %xmm10
mulss %xmm5, %xmm10
movaps %xmm7, %xmm0
mulss %xmm6, %xmm0
addss %xmm10, %xmm0
addss (%rbx), %xmm0
movss %xmm0, 0(%r13)
movaps %xmm2, %xmm10
mulss %xmm5, %xmm10
movaps %xmm3, %xmm0
mulss %xmm6, %xmm0
addss %xmm10, %xmm0
addss 4(%rbx), %xmm0
movss %xmm0, 4(%r13)
mulss %xmm1, %xmm5
mulss %xmm4, %xmm6
addss %xmm6, %xmm5
addss 8(%rbx), %xmm5
movss %xmm5, 8(%r13)
movss %xmm9, 44(%rsp)
mulss %xmm7, %xmm1
mulss %xmm8, %xmm4
subss %xmm4, %xmm1
movss %xmm1, 48(%rsp)
mulss %xmm3, %xmm8
mulss %xmm7, %xmm2
subss %xmm2, %xmm8
movss %xmm8, 52(%rsp)
leaq 44(%rsp), %rdi
leaq 52(%rsp), %rdx
leaq 48(%rsp), %rsi
call _Z12UitizeVectorPfS_S_
movq 44(%rsp), %xmm2
movss 52(%rsp), %xmm3
movq 140(%rsp), %xmm0
movss 148(%rsp), %xmm1
call _Z18GetReflectedVector5PointS_
movq %xmm0, 12(%r13)
movss %xmm1, 20(%r13)
movl 24(%rsp), %ecx
jmp .L9
.L7:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L28
movq %r13, %rax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3804:
.size _Z26CalculateReflectedRayOnCPU3RayP4FaceiPiS2_, .-_Z26CalculateReflectedRayOnCPU3RayP4FaceiPiS2_
.globl _Z33CalculateVirtualReflectedRayOnCPU3Ray4Face
.type _Z33CalculateVirtualReflectedRayOnCPU3Ray4Face, @function
_Z33CalculateVirtualReflectedRayOnCPU3Ray4Face:
.LFB3805:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbx
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl 104(%rsp), %ecx
movss 108(%rsp), %xmm13
movss 112(%rsp), %xmm14
movl $0x00000000, (%rdi)
movl $0x00000000, 4(%rdi)
movl $0x00000000, 8(%rdi)
movl $0x00000000, 12(%rdi)
movl $0x00000000, 16(%rdi)
movl $0x00000000, 20(%rdi)
movss .LC4(%rip), %xmm0
movss 92(%rsp), %xmm1
xorps %xmm0, %xmm1
movaps %xmm1, %xmm15
movss 96(%rsp), %xmm4
xorps %xmm0, %xmm4
movss 100(%rsp), %xmm7
xorps %xmm0, %xmm7
movss 116(%rsp), %xmm6
movd %ecx, %xmm5
subss %xmm5, %xmm6
movss 120(%rsp), %xmm2
subss %xmm13, %xmm2
movss 124(%rsp), %xmm1
subss %xmm14, %xmm1
movss 128(%rsp), %xmm8
subss %xmm5, %xmm8
movss 132(%rsp), %xmm5
subss %xmm13, %xmm5
movss 136(%rsp), %xmm3
subss %xmm14, %xmm3
movaps %xmm2, %xmm9
mulss %xmm3, %xmm9
movaps %xmm1, %xmm0
mulss %xmm5, %xmm0
subss %xmm0, %xmm9
movaps %xmm4, %xmm11
mulss %xmm3, %xmm11
movaps %xmm7, %xmm0
mulss %xmm5, %xmm0
subss %xmm0, %xmm11
movaps %xmm4, %xmm10
mulss %xmm1, %xmm10
movaps %xmm7, %xmm0
mulss %xmm2, %xmm0
subss %xmm0, %xmm10
movss %xmm15, 20(%rsp)
movaps %xmm15, %xmm0
mulss %xmm9, %xmm0
movaps %xmm11, %xmm12
mulss %xmm6, %xmm12
subss %xmm12, %xmm0
movaps %xmm10, %xmm12
mulss %xmm8, %xmm12
addss %xmm12, %xmm0
movaps %xmm0, %xmm12
andps .LC5(%rip), %xmm12
cvtss2sd %xmm12, %xmm12
comisd .LC6(%rip), %xmm12
jbe .L29
movss 80(%rsp), %xmm12
movd %ecx, %xmm15
subss %xmm15, %xmm12
movss %xmm12, 24(%rsp)
movss 84(%rsp), %xmm12
subss %xmm13, %xmm12
movaps %xmm12, %xmm15
movss 88(%rsp), %xmm12
subss %xmm14, %xmm12
movss %xmm12, 4(%rsp)
movss %xmm15, 28(%rsp)
movaps %xmm15, %xmm12
mulss %xmm3, %xmm12
movaps %xmm12, %xmm15
movss 4(%rsp), %xmm12
mulss %xmm5, %xmm12
subss %xmm12, %xmm15
movss %xmm15, 8(%rsp)
movss 28(%rsp), %xmm12
mulss %xmm1, %xmm12
movss %xmm12, 12(%rsp)
movss 4(%rsp), %xmm15
mulss %xmm2, %xmm15
movss %xmm15, 16(%rsp)
movss 24(%rsp), %xmm12
mulss %xmm9, %xmm12
movss 8(%rsp), %xmm15
mulss %xmm6, %xmm15
subss %xmm15, %xmm12
movss 12(%rsp), %xmm15
subss 16(%rsp), %xmm15
mulss %xmm8, %xmm15
addss %xmm15, %xmm12
divss %xmm0, %xmm12
movss .LC2(%rip), %xmm15
comiss %xmm12, %xmm15
jbe .L29
comiss .LC7(%rip), %xmm12
ja .L39
.L29:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L40
movq %rbx, %rax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
mulss 4(%rsp), %xmm4
mulss 28(%rsp), %xmm7
subss %xmm7, %xmm4
movss 8(%rsp), %xmm7
movss 20(%rsp), %xmm15
mulss %xmm15, %xmm7
movss 24(%rsp), %xmm12
mulss %xmm12, %xmm11
subss %xmm11, %xmm7
movaps %xmm4, %xmm11
mulss %xmm8, %xmm11
addss %xmm11, %xmm7
divss %xmm0, %xmm7
movss 16(%rsp), %xmm11
subss 12(%rsp), %xmm11
mulss %xmm15, %xmm11
mulss %xmm6, %xmm4
subss %xmm4, %xmm11
mulss %xmm12, %xmm10
addss %xmm10, %xmm11
divss %xmm0, %xmm11
movaps %xmm6, %xmm0
mulss %xmm7, %xmm0
movaps %xmm8, %xmm4
mulss %xmm11, %xmm4
addss %xmm4, %xmm0
movd %ecx, %xmm4
addss %xmm4, %xmm0
movss %xmm0, (%rdi)
movaps %xmm2, %xmm0
mulss %xmm7, %xmm0
movaps %xmm5, %xmm4
mulss %xmm11, %xmm4
addss %xmm4, %xmm0
addss %xmm13, %xmm0
movss %xmm0, 4(%rdi)
mulss %xmm1, %xmm7
mulss %xmm3, %xmm11
addss %xmm11, %xmm7
addss %xmm14, %xmm7
movss %xmm7, 8(%rdi)
movss %xmm9, 44(%rsp)
mulss %xmm8, %xmm1
mulss %xmm6, %xmm3
subss %xmm3, %xmm1
movss %xmm1, 48(%rsp)
mulss %xmm5, %xmm6
mulss %xmm8, %xmm2
subss %xmm2, %xmm6
movss %xmm6, 52(%rsp)
leaq 44(%rsp), %rdi
leaq 52(%rsp), %rdx
leaq 48(%rsp), %rsi
call _Z12UitizeVectorPfS_S_
movq 44(%rsp), %xmm2
movss 52(%rsp), %xmm3
movq 92(%rsp), %xmm0
movss 100(%rsp), %xmm1
call _Z18GetReflectedVector5PointS_
movq %xmm0, 12(%rbx)
movss %xmm1, 20(%rbx)
jmp .L29
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3805:
.size _Z33CalculateVirtualReflectedRayOnCPU3Ray4Face, .-_Z33CalculateVirtualReflectedRayOnCPU3Ray4Face
.globl _Z37CalculateReflectedRayWithTerrainOnCPU3RayP4FaceiPiS2_
.type _Z37CalculateReflectedRayWithTerrainOnCPU3RayP4FaceiPiS2_, @function
_Z37CalculateReflectedRayWithTerrainOnCPU3RayP4FaceiPiS2_:
.LFB3806:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r13
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $0x00000000, (%rdi)
movl $0x00000000, 4(%rdi)
movl $0x00000000, 8(%rdi)
movl $0x00000000, 12(%rdi)
movl $0x00000000, 16(%rdi)
movl $0x00000000, 20(%rdi)
testl %edx, %edx
jle .L41
movl %edx, %r12d
movq %rcx, %r14
movq %r8, %r15
movq %rsi, %rbx
movl $0, %ebp
movl .LC2(%rip), %ecx
jmp .L51
.L43:
addl $1, %ebp
addq $36, %rbx
cmpl %ebp, %r12d
je .L41
.L51:
movss 140(%rsp), %xmm10
xorps .LC4(%rip), %xmm10
movaps %xmm10, %xmm13
movss 144(%rsp), %xmm6
xorps .LC4(%rip), %xmm6
movss 148(%rsp), %xmm5
xorps .LC4(%rip), %xmm5
movss (%rbx), %xmm11
movss 12(%rbx), %xmm8
subss %xmm11, %xmm8
movss 4(%rbx), %xmm14
movss 16(%rbx), %xmm2
subss %xmm14, %xmm2
movss 8(%rbx), %xmm15
movss 20(%rbx), %xmm1
subss %xmm15, %xmm1
movss 24(%rbx), %xmm7
subss %xmm11, %xmm7
movss 28(%rbx), %xmm3
subss %xmm14, %xmm3
movss 32(%rbx), %xmm4
subss %xmm15, %xmm4
movaps %xmm2, %xmm9
mulss %xmm4, %xmm9
movaps %xmm1, %xmm0
mulss %xmm3, %xmm0
subss %xmm0, %xmm9
movaps %xmm6, %xmm10
mulss %xmm4, %xmm10
movaps %xmm5, %xmm0
mulss %xmm3, %xmm0
subss %xmm0, %xmm10
movaps %xmm6, %xmm12
mulss %xmm1, %xmm12
movaps %xmm5, %xmm0
mulss %xmm2, %xmm0
subss %xmm0, %xmm12
movss %xmm13, 12(%rsp)
movaps %xmm13, %xmm0
mulss %xmm9, %xmm0
movss %xmm10, 8(%rsp)
movaps %xmm10, %xmm13
mulss %xmm8, %xmm13
subss %xmm13, %xmm0
movaps %xmm12, %xmm13
mulss %xmm7, %xmm13
addss %xmm13, %xmm0
movaps %xmm0, %xmm13
andps .LC5(%rip), %xmm13
cvtss2sd %xmm13, %xmm13
comisd .LC6(%rip), %xmm13
jbe .L43
movss 128(%rsp), %xmm13
subss %xmm11, %xmm13
movss %xmm13, 16(%rsp)
movss 132(%rsp), %xmm11
subss %xmm14, %xmm11
movaps %xmm11, %xmm10
movss 136(%rsp), %xmm13
subss %xmm15, %xmm13
movaps %xmm11, %xmm15
mulss %xmm4, %xmm15
movaps %xmm3, %xmm11
mulss %xmm13, %xmm11
movaps %xmm15, %xmm14
subss %xmm11, %xmm14
movss %xmm10, 28(%rsp)
movaps %xmm10, %xmm11
mulss %xmm1, %xmm11
movaps %xmm2, %xmm15
mulss %xmm13, %xmm15
movss %xmm15, 20(%rsp)
movss 16(%rsp), %xmm15
mulss %xmm9, %xmm15
movaps %xmm14, %xmm10
mulss %xmm8, %xmm10
subss %xmm10, %xmm15
movss %xmm15, 24(%rsp)
movaps %xmm11, %xmm10
subss 20(%rsp), %xmm10
movaps %xmm10, %xmm15
mulss %xmm7, %xmm15
addss 24(%rsp), %xmm15
divss %xmm0, %xmm15
movd %ecx, %xmm10
comiss %xmm15, %xmm10
jbe .L43
movss %xmm15, 24(%rsp)
comiss .LC7(%rip), %xmm15
jbe .L43
mulss %xmm13, %xmm6
mulss 28(%rsp), %xmm5
subss %xmm5, %xmm6
movaps %xmm14, %xmm5
movss 12(%rsp), %xmm13
mulss %xmm13, %xmm5
movss 16(%rsp), %xmm14
movss 8(%rsp), %xmm10
mulss %xmm14, %xmm10
subss %xmm10, %xmm5
movaps %xmm6, %xmm10
mulss %xmm7, %xmm10
addss %xmm10, %xmm5
divss %xmm0, %xmm5
pxor %xmm10, %xmm10
comiss %xmm10, %xmm5
jb .L43
movss .LC7(%rip), %xmm10
comiss %xmm5, %xmm10
jb .L43
movss 20(%rsp), %xmm10
subss %xmm11, %xmm10
mulss %xmm13, %xmm10
mulss %xmm8, %xmm6
subss %xmm6, %xmm10
mulss %xmm14, %xmm12
movaps %xmm10, %xmm6
addss %xmm12, %xmm6
divss %xmm0, %xmm6
pxor %xmm0, %xmm0
comiss %xmm0, %xmm6
jbe .L43
movss .LC7(%rip), %xmm10
movaps %xmm5, %xmm0
addss %xmm6, %xmm0
comiss %xmm0, %xmm10
jbe .L43
movl $1, (%r15)
movl %ebp, (%r14)
movaps %xmm8, %xmm10
mulss %xmm5, %xmm10
movaps %xmm7, %xmm0
mulss %xmm6, %xmm0
addss %xmm10, %xmm0
addss (%rbx), %xmm0
movss %xmm0, 0(%r13)
movaps %xmm2, %xmm10
mulss %xmm5, %xmm10
movaps %xmm3, %xmm0
mulss %xmm6, %xmm0
addss %xmm10, %xmm0
addss 4(%rbx), %xmm0
movss %xmm0, 4(%r13)
mulss %xmm1, %xmm5
mulss %xmm4, %xmm6
addss %xmm6, %xmm5
addss 8(%rbx), %xmm5
movss %xmm5, 8(%r13)
movss %xmm9, 44(%rsp)
mulss %xmm7, %xmm1
mulss %xmm8, %xmm4
subss %xmm4, %xmm1
movss %xmm1, 48(%rsp)
mulss %xmm3, %xmm8
mulss %xmm7, %xmm2
subss %xmm2, %xmm8
movss %xmm8, 52(%rsp)
leaq 44(%rsp), %rdi
leaq 52(%rsp), %rdx
leaq 48(%rsp), %rsi
call _Z12UitizeVectorPfS_S_
movq 44(%rsp), %xmm2
movss 52(%rsp), %xmm3
movq 140(%rsp), %xmm0
movss 148(%rsp), %xmm1
call _Z18GetReflectedVector5PointS_
movq %xmm0, 12(%r13)
movss %xmm1, 20(%r13)
movl 24(%rsp), %ecx
jmp .L43
.L41:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L62
movq %r13, %rax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L62:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3806:
.size _Z37CalculateReflectedRayWithTerrainOnCPU3RayP4FaceiPiS2_, .-_Z37CalculateReflectedRayWithTerrainOnCPU3RayP4FaceiPiS2_
.globl _Z21GetIntersectionOfRays3RayS_
.type _Z21GetIntersectionOfRays3RayS_, @function
_Z21GetIntersectionOfRays3RayS_:
.LFB3807:
.cfi_startproc
endbr64
movss 8(%rsp), %xmm9
movss 12(%rsp), %xmm8
movss 16(%rsp), %xmm1
movss 20(%rsp), %xmm12
movss 24(%rsp), %xmm10
movss 28(%rsp), %xmm11
movss .LC4(%rip), %xmm2
movss 44(%rsp), %xmm6
xorps %xmm2, %xmm6
movss 48(%rsp), %xmm4
xorps %xmm2, %xmm4
movss 52(%rsp), %xmm0
movaps %xmm0, %xmm7
xorps %xmm2, %xmm7
movss 32(%rsp), %xmm3
subss %xmm9, %xmm3
movss 36(%rsp), %xmm5
subss %xmm8, %xmm5
movss 40(%rsp), %xmm0
subss %xmm1, %xmm0
movaps %xmm4, %xmm2
mulss %xmm0, %xmm2
movaps %xmm7, %xmm13
mulss %xmm5, %xmm13
subss %xmm13, %xmm2
mulss %xmm12, %xmm2
mulss %xmm10, %xmm0
movaps %xmm11, %xmm13
mulss %xmm5, %xmm13
subss %xmm13, %xmm0
mulss %xmm6, %xmm0
subss %xmm0, %xmm2
mulss %xmm10, %xmm7
movaps %xmm7, %xmm0
movaps %xmm11, %xmm7
mulss %xmm4, %xmm7
subss %xmm7, %xmm0
mulss %xmm3, %xmm0
addss %xmm2, %xmm0
andps .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movsd .LC8(%rip), %xmm2
comisd %xmm0, %xmm2
jbe .L71
mulss %xmm4, %xmm3
mulss %xmm6, %xmm5
subss %xmm5, %xmm3
movaps %xmm3, %xmm0
andps .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm0, %xmm2
ja .L64
mulss %xmm12, %xmm4
mulss %xmm10, %xmm6
subss %xmm6, %xmm4
movaps %xmm4, %xmm0
andps .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
comisd .LC9(%rip), %xmm0
jbe .L72
divss %xmm4, %xmm3
mulss %xmm3, %xmm12
addss %xmm12, %xmm9
mulss %xmm3, %xmm10
addss %xmm10, %xmm8
mulss %xmm3, %xmm11
addss %xmm11, %xmm1
jmp .L64
.L71:
pxor %xmm1, %xmm1
movaps %xmm1, %xmm8
movaps %xmm1, %xmm9
.L64:
movss %xmm9, -20(%rsp)
movss %xmm8, -16(%rsp)
movq -20(%rsp), %xmm0
ret
.L72:
pxor %xmm1, %xmm1
movaps %xmm1, %xmm8
movaps %xmm1, %xmm9
jmp .L64
.cfi_endproc
.LFE3807:
.size _Z21GetIntersectionOfRays3RayS_, .-_Z21GetIntersectionOfRays3RayS_
.globl _Z15JudgePointEqual5PointS_
.type _Z15JudgePointEqual5PointS_, @function
_Z15JudgePointEqual5PointS_:
.LFB3808:
.cfi_startproc
endbr64
movq %xmm0, -16(%rsp)
movss %xmm1, -8(%rsp)
movq %xmm2, -32(%rsp)
movss %xmm3, -24(%rsp)
movss -16(%rsp), %xmm0
ucomiss -32(%rsp), %xmm0
jp .L77
jne .L77
movss -12(%rsp), %xmm0
ucomiss -28(%rsp), %xmm0
jp .L78
jne .L78
ucomiss %xmm3, %xmm1
setnp %al
movl $0, %edx
cmovne %edx, %eax
ret
.L77:
movl $0, %eax
ret
.L78:
movl $0, %eax
ret
.cfi_endproc
.LFE3808:
.size _Z15JudgePointEqual5PointS_, .-_Z15JudgePointEqual5PointS_
.globl _Z14ExchangeTwoRayP3RayS0_
.type _Z14ExchangeTwoRayP3RayS0_, @function
_Z14ExchangeTwoRayP3RayS0_:
.LFB3809:
.cfi_startproc
endbr64
movss (%rdi), %xmm5
movss 4(%rdi), %xmm4
movss 8(%rdi), %xmm3
movss 12(%rdi), %xmm2
movss 16(%rdi), %xmm1
movss 20(%rdi), %xmm0
movdqu (%rsi), %xmm6
movups %xmm6, (%rdi)
movq 16(%rsi), %rax
movq %rax, 16(%rdi)
movss %xmm5, (%rsi)
movss %xmm4, 4(%rsi)
movss %xmm3, 8(%rsi)
movss %xmm2, 12(%rsi)
movss %xmm1, 16(%rsi)
movss %xmm0, 20(%rsi)
ret
.cfi_endproc
.LFE3809:
.size _Z14ExchangeTwoRayP3RayS0_, .-_Z14ExchangeTwoRayP3RayS0_
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3835:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3835:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 1293080650
.long 1074340347
.align 8
.LC1:
.long 0
.long 1080459264
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 1195593728
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC4:
.long -2147483648
.long 0
.long 0
.long 0
.align 16
.LC5:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC6:
.long -1598689907
.long 1051772663
.section .rodata.cst4
.align 4
.LC7:
.long 1065353216
.section .rodata.cst8
.align 8
.LC8:
.long -755914244
.long 1062232653
.align 8
.LC9:
.long -1998362383
.long 1055193269
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "BasicTool.cuh"
//½Ç¶Èת»¡¶È
double AngleToRad(double angle)
{
double temp;
temp=angle*PI;
return temp/180;
}
//¼ÆËãÁ½µã¼ä¾àÀë
float CalculateDistanceOfBipartite(Point a,Point b)
{
return sqrt((a.x-b.x)*(a.x-b.x)+(a.y-b.y)*(a.y-b.y)+(a.z-b.z)*(a.z-b.z));
}
void UitizeVector(float *a,float *b,float *c)//¹éÒ»»¯ÏòÁ¿
{
float length=sqrt((*a)*(*a)+(*b)*(*b)+(*c)*(*c));
*a=*a/length;
*b=*b/length;
*c=*c/length;
}
Point GetReflectedVector(Point d,Point n)//¸ù¾Ý·´ÉäÃæµÄµ¥Î»·¨ÏòÁ¿ºÍÈëÉäÏòÁ¿µÃµ½·´ÉäÏòÁ¿
{
Point reflectedPoint;
float temp=2*(d.x*n.x+d.y*n.y+d.z*n.z);
reflectedPoint.x=d.x-temp*n.x;
reflectedPoint.y=d.y-temp*n.y;
reflectedPoint.z=d.z-temp*n.z;
return reflectedPoint;
}
Ray CalculateReflectedRayOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)//µ¥ÌõÉäÏßÓëËùÓÐÈý½ÇÃæÅн»
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i]. B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i]. C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
int remain=i%12;
if(t_numerator/denominator<t&&t_numerator/denominator>1&&u>=0&&u<=1&&v>0&&(u+v)<1&&v<=1&&remain<8)
{
*flag=1;
*reflectedFace=i;
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face[i].A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face[i].A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face[i].A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
}
return reflectedRay;
}
Ray CalculateVirtualReflectedRayOnCPU(Ray incidentRay,Face face)//µÃµ½µ¥ÌõÉäÏßÓëij¸öÈý½ÇÃæµÄÐéÄâ·´ÉäÉäÏß
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face.B.x-face.A.x,b2=face.B.y-face.A.y,b3=face.B.z-face.A.z;
float c1=face.C.x-face.A.x,c2=face.C.y-face.A.y,c3=face.C.z-face.A.z;
float x1=incidentRay.originalPoint.x-face.A.x,x2=incidentRay.originalPoint.y-face.A.y,x3=incidentRay.originalPoint.z-face.A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1)
{
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face.A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face.A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face.A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
return reflectedRay;
}
Ray CalculateReflectedRayWithTerrainOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)//µ¥ÌõÉäÏßÓëËùÓÐÈý½ÇÃæÅн»
{
Ray reflectedRay;
Point defaultPoint;
defaultPoint.x=0;defaultPoint.y=0;defaultPoint.z=0;
reflectedRay.originalPoint=defaultPoint;
reflectedRay.direction=defaultPoint;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i].B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i].C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*x3-a3*x2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1&&u>=0&&u<=1&&v>0&&(u+v)<1&&v<=1)
{
*flag=1;
*reflectedFace=i;
t=t_numerator/denominator;
reflectedRay.originalPoint.x=u*b1+v*c1+face[i].A.x;
reflectedRay.originalPoint.y=u*b2+v*c2+face[i].A.y;
reflectedRay.originalPoint.z=u*b3+v*c3+face[i].A.z;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
UitizeVector(&n.x,&n.y,&n.z);
reflectedRay.direction=GetReflectedVector(incidentRay.direction,n);
}
}
}
return reflectedRay;
}
//¼ÆËãÁ½ÉäÏß½»µã £¨ÓÃÓÚÇóÈÆÉäÐéÄâ·¢Éäµã£©
Point GetIntersectionOfRays(Ray ray1,Ray ray2)
{
float a1=ray1.direction.x,a2=ray1.direction.y,a3=ray1.direction.z;
float b1=-ray2.direction.x,b2=-ray2.direction.y,b3=-ray2.direction.z;
float c1=ray2.originalPoint.x-ray1.originalPoint.x,c2=ray2.originalPoint.y-ray1.originalPoint.y,c3=ray2.originalPoint.z-ray1.originalPoint.z;
float det=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
Point intersection;
intersection.x=0;intersection.y=0;intersection.z=0;
if(abs(det)<0.001)
{
if (abs(c1*b2-c2*b1)<0.001)
{
intersection.x=ray1.originalPoint.x;
intersection.y=ray1.originalPoint.y;
intersection.z=ray1.originalPoint.z;
}
else if ((abs(b2*a1-b1*a2)>0.00001))
{
float u=(c1*b2-c2*b1)/(b2*a1-b1*a2);
//float v=(c2*a1-c1*a2)/(b2*a1-b1*a2);
intersection.x=ray1.originalPoint.x+u*a1;
intersection.y=ray1.originalPoint.y+u*a2;
intersection.z=ray1.originalPoint.z+u*a3;
}
}
else
{
intersection.x=0;intersection.y=0;intersection.z=0;
}
return intersection;
}
bool JudgePointEqual(Point a, Point b)//ÅжÏÁ½¸öµãÊÇ·ñÏàµÈ
{
if(a.x == b.x && a.y == b.y && a.z == b.z)
return true;
return false;
}
void ExchangeTwoRay(Ray *ray1, Ray *ray2)//½»»»Á½ÌõÉäÏß
{
Ray tempRay;
tempRay = *ray1;
*ray1 = *ray2;
*ray2 = tempRay;
} | #pragma once
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <fstream>
#include "hip/hip_runtime.h"
using namespace std;
#define PI 3.1415926
__device__ __host__ struct Point//¶¨Òåµã
{
float x;
float y;
float z;
};
__device__ __host__ struct Face//¶¨ÒåÈý½ÇÃæ
{
Point A;
Point B;
Point C;
};
__device__ __host__ struct Ray//¶¨ÒåÉäÏß
{
Point originalPoint;
Point direction;
};
__device__ __host__ struct PathNode//·¾¶½Úµã
{
Point point1;
Point point2;
Point point3;
};
__device__ __host__ struct Path
{
PathNode node1;
PathNode node2;
PathNode node3;
int nodeLevel;
};
__device__ __host__ struct TriangleRayTube//¶¨ÒåÈý½ÇÐÎÉäÏß¹Ü
{
Ray ray1;
Ray ray2;
Ray ray3;
Path path;
};
__device__ __host__ struct QuadrangleRayTube //¶¨ÒåËıßÐÎÉäÏß¹Ü
{
Ray ray1;
Ray ray2;
Ray ray3;
Ray ray4;
Path path;
};
__device__ __host__ struct Edge
{
Point p1;
Point p2;
};
double AngleToRad(double angle);//½Ç¶È±ä»¡¶È
float CalculateDistanceOfBipartite(Point a,Point b);//¼ÆËãÁ½µãÖ®¼äµÄ¾àÀë
void UitizeVector(float *a,float *b,float *c);//µ¥Î»»¯ÏòÁ¿
Point GetReflectedVector(Point d,Point n);//Çó·´ÉäÏòÁ¿
Ray CalculateReflectedRayOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag);//¼ÆËã·´ÉäÉäÏß
Ray CalculateVirtualReflectedRayOnCPU(Ray incidentRay,Face face);
Ray CalculateReflectedRayWithTerrainOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag);
Point GetIntersectionOfRays(Ray ray1,Ray ray2);//»ñµÃÁ½¸öÉäÏߵĽ»µã
bool JudgePointEqual(Point a, Point b);//ÅжÏÁ½¸öµãÊÇ·ñÏàµÈ
void ExchangeTwoRay(Ray *ray1, Ray *ray2);//½»»»Á½ÌõÉäÏß |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #pragma once
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <fstream>
#include "hip/hip_runtime.h"
using namespace std;
#define PI 3.1415926
__device__ __host__ struct Point//¶¨Òåµã
{
float x;
float y;
float z;
};
__device__ __host__ struct Face//¶¨ÒåÈý½ÇÃæ
{
Point A;
Point B;
Point C;
};
__device__ __host__ struct Ray//¶¨ÒåÉäÏß
{
Point originalPoint;
Point direction;
};
__device__ __host__ struct PathNode//·¾¶½Úµã
{
Point point1;
Point point2;
Point point3;
};
__device__ __host__ struct Path
{
PathNode node1;
PathNode node2;
PathNode node3;
int nodeLevel;
};
__device__ __host__ struct TriangleRayTube//¶¨ÒåÈý½ÇÐÎÉäÏß¹Ü
{
Ray ray1;
Ray ray2;
Ray ray3;
Path path;
};
__device__ __host__ struct QuadrangleRayTube //¶¨ÒåËıßÐÎÉäÏß¹Ü
{
Ray ray1;
Ray ray2;
Ray ray3;
Ray ray4;
Path path;
};
__device__ __host__ struct Edge
{
Point p1;
Point p2;
};
double AngleToRad(double angle);//½Ç¶È±ä»¡¶È
float CalculateDistanceOfBipartite(Point a,Point b);//¼ÆËãÁ½µãÖ®¼äµÄ¾àÀë
void UitizeVector(float *a,float *b,float *c);//µ¥Î»»¯ÏòÁ¿
Point GetReflectedVector(Point d,Point n);//Çó·´ÉäÏòÁ¿
Ray CalculateReflectedRayOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag);//¼ÆËã·´ÉäÉäÏß
Ray CalculateVirtualReflectedRayOnCPU(Ray incidentRay,Face face);
Ray CalculateReflectedRayWithTerrainOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag);
Point GetIntersectionOfRays(Ray ray1,Ray ray2);//»ñµÃÁ½¸öÉäÏߵĽ»µã
bool JudgePointEqual(Point a, Point b);//ÅжÏÁ½¸öµãÊÇ·ñÏàµÈ
void ExchangeTwoRay(Ray *ray1, Ray *ray2);//½»»»Á½ÌõÉäÏß | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #pragma once
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <fstream>
#include "hip/hip_runtime.h"
using namespace std;
#define PI 3.1415926
__device__ __host__ struct Point//¶¨Òåµã
{
float x;
float y;
float z;
};
__device__ __host__ struct Face//¶¨ÒåÈý½ÇÃæ
{
Point A;
Point B;
Point C;
};
__device__ __host__ struct Ray//¶¨ÒåÉäÏß
{
Point originalPoint;
Point direction;
};
__device__ __host__ struct PathNode//·¾¶½Úµã
{
Point point1;
Point point2;
Point point3;
};
__device__ __host__ struct Path
{
PathNode node1;
PathNode node2;
PathNode node3;
int nodeLevel;
};
__device__ __host__ struct TriangleRayTube//¶¨ÒåÈý½ÇÐÎÉäÏß¹Ü
{
Ray ray1;
Ray ray2;
Ray ray3;
Path path;
};
__device__ __host__ struct QuadrangleRayTube //¶¨ÒåËıßÐÎÉäÏß¹Ü
{
Ray ray1;
Ray ray2;
Ray ray3;
Ray ray4;
Path path;
};
__device__ __host__ struct Edge
{
Point p1;
Point p2;
};
double AngleToRad(double angle);//½Ç¶È±ä»¡¶È
float CalculateDistanceOfBipartite(Point a,Point b);//¼ÆËãÁ½µãÖ®¼äµÄ¾àÀë
void UitizeVector(float *a,float *b,float *c);//µ¥Î»»¯ÏòÁ¿
Point GetReflectedVector(Point d,Point n);//Çó·´ÉäÏòÁ¿
Ray CalculateReflectedRayOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag);//¼ÆËã·´ÉäÉäÏß
Ray CalculateVirtualReflectedRayOnCPU(Ray incidentRay,Face face);
Ray CalculateReflectedRayWithTerrainOnCPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag);
Point GetIntersectionOfRays(Ray ray1,Ray ray2);//»ñµÃÁ½¸öÉäÏߵĽ»µã
bool JudgePointEqual(Point a, Point b);//ÅжÏÁ½¸öµãÊÇ·ñÏàµÈ
void ExchangeTwoRay(Ray *ray1, Ray *ray2);//½»»»Á½ÌõÉäÏß | .text
.file "BasicTool.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001241a6_00000000-6_BasicTool.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3812:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3812:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10AngleToRadd
.type _Z10AngleToRadd, @function
_Z10AngleToRadd:
.LFB3800:
.cfi_startproc
endbr64
mulsd .LC0(%rip), %xmm0
divsd .LC1(%rip), %xmm0
ret
.cfi_endproc
.LFE3800:
.size _Z10AngleToRadd, .-_Z10AngleToRadd
.globl _Z28CalculateDistanceOfBipartite5PointS_
.type _Z28CalculateDistanceOfBipartite5PointS_, @function
_Z28CalculateDistanceOfBipartite5PointS_:
.LFB3801:
.cfi_startproc
endbr64
movq %xmm0, -16(%rsp)
movq %xmm2, -32(%rsp)
movss -16(%rsp), %xmm2
subss -32(%rsp), %xmm2
movss -12(%rsp), %xmm0
subss -28(%rsp), %xmm0
subss %xmm3, %xmm1
mulss %xmm2, %xmm2
mulss %xmm0, %xmm0
addss %xmm0, %xmm2
mulss %xmm1, %xmm1
addss %xmm1, %xmm2
movaps %xmm2, %xmm0
sqrtss %xmm0, %xmm0
ret
.cfi_endproc
.LFE3801:
.size _Z28CalculateDistanceOfBipartite5PointS_, .-_Z28CalculateDistanceOfBipartite5PointS_
.globl _Z12UitizeVectorPfS_S_
.type _Z12UitizeVectorPfS_S_, @function
_Z12UitizeVectorPfS_S_:
.LFB3802:
.cfi_startproc
endbr64
movss (%rdi), %xmm1
movss (%rsi), %xmm3
movss (%rdx), %xmm2
movaps %xmm1, %xmm0
mulss %xmm1, %xmm0
mulss %xmm3, %xmm3
addss %xmm3, %xmm0
mulss %xmm2, %xmm2
addss %xmm2, %xmm0
sqrtss %xmm0, %xmm0
divss %xmm0, %xmm1
movss %xmm1, (%rdi)
movss (%rsi), %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%rsi)
movss (%rdx), %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%rdx)
ret
.cfi_endproc
.LFE3802:
.size _Z12UitizeVectorPfS_S_, .-_Z12UitizeVectorPfS_S_
.globl _Z18GetReflectedVector5PointS_
.type _Z18GetReflectedVector5PointS_, @function
_Z18GetReflectedVector5PointS_:
.LFB3803:
.cfi_startproc
endbr64
movq %xmm0, -40(%rsp)
movq %xmm2, -56(%rsp)
movss -40(%rsp), %xmm5
movss -36(%rsp), %xmm2
movss -56(%rsp), %xmm6
movss -52(%rsp), %xmm4
movaps %xmm5, %xmm0
mulss %xmm6, %xmm0
movaps %xmm2, %xmm7
mulss %xmm4, %xmm7
addss %xmm7, %xmm0
movaps %xmm1, %xmm7
mulss %xmm3, %xmm7
addss %xmm7, %xmm0
addss %xmm0, %xmm0
mulss %xmm0, %xmm6
subss %xmm6, %xmm5
movss %xmm5, -20(%rsp)
mulss %xmm0, %xmm4
subss %xmm4, %xmm2
movss %xmm2, -16(%rsp)
mulss %xmm0, %xmm3
movq -20(%rsp), %xmm0
subss %xmm3, %xmm1
ret
.cfi_endproc
.LFE3803:
.size _Z18GetReflectedVector5PointS_, .-_Z18GetReflectedVector5PointS_
.globl _Z26CalculateReflectedRayOnCPU3RayP4FaceiPiS2_
.type _Z26CalculateReflectedRayOnCPU3RayP4FaceiPiS2_, @function
_Z26CalculateReflectedRayOnCPU3RayP4FaceiPiS2_:
.LFB3804:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r13
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $0x00000000, (%rdi)
movl $0x00000000, 4(%rdi)
movl $0x00000000, 8(%rdi)
movl $0x00000000, 12(%rdi)
movl $0x00000000, 16(%rdi)
movl $0x00000000, 20(%rdi)
testl %edx, %edx
jle .L7
movl %edx, %r12d
movq %rcx, %r14
movq %r8, %r15
movq %rsi, %rbx
movl $0, %ebp
movl .LC2(%rip), %ecx
jmp .L17
.L9:
addl $1, %ebp
addq $36, %rbx
cmpl %ebp, %r12d
je .L7
.L17:
movss 140(%rsp), %xmm10
xorps .LC4(%rip), %xmm10
movaps %xmm10, %xmm13
movss 144(%rsp), %xmm6
xorps .LC4(%rip), %xmm6
movss 148(%rsp), %xmm5
xorps .LC4(%rip), %xmm5
movss (%rbx), %xmm11
movss 12(%rbx), %xmm8
subss %xmm11, %xmm8
movss 4(%rbx), %xmm14
movss 16(%rbx), %xmm2
subss %xmm14, %xmm2
movss 8(%rbx), %xmm15
movss 20(%rbx), %xmm1
subss %xmm15, %xmm1
movss 24(%rbx), %xmm7
subss %xmm11, %xmm7
movss 28(%rbx), %xmm3
subss %xmm14, %xmm3
movss 32(%rbx), %xmm4
subss %xmm15, %xmm4
movaps %xmm2, %xmm9
mulss %xmm4, %xmm9
movaps %xmm1, %xmm0
mulss %xmm3, %xmm0
subss %xmm0, %xmm9
movaps %xmm6, %xmm10
mulss %xmm4, %xmm10
movaps %xmm5, %xmm0
mulss %xmm3, %xmm0
subss %xmm0, %xmm10
movaps %xmm6, %xmm12
mulss %xmm1, %xmm12
movaps %xmm5, %xmm0
mulss %xmm2, %xmm0
subss %xmm0, %xmm12
movss %xmm13, 12(%rsp)
movaps %xmm13, %xmm0
mulss %xmm9, %xmm0
movss %xmm10, 8(%rsp)
movaps %xmm10, %xmm13
mulss %xmm8, %xmm13
subss %xmm13, %xmm0
movaps %xmm12, %xmm13
mulss %xmm7, %xmm13
addss %xmm13, %xmm0
movaps %xmm0, %xmm13
andps .LC5(%rip), %xmm13
cvtss2sd %xmm13, %xmm13
comisd .LC6(%rip), %xmm13
jbe .L9
movss 128(%rsp), %xmm13
subss %xmm11, %xmm13
movss %xmm13, 16(%rsp)
movss 132(%rsp), %xmm11
subss %xmm14, %xmm11
movaps %xmm11, %xmm10
movss 136(%rsp), %xmm13
subss %xmm15, %xmm13
movaps %xmm11, %xmm15
mulss %xmm4, %xmm15
movaps %xmm3, %xmm11
mulss %xmm13, %xmm11
movaps %xmm15, %xmm14
subss %xmm11, %xmm14
movss %xmm10, 28(%rsp)
movaps %xmm10, %xmm11
mulss %xmm1, %xmm11
movaps %xmm2, %xmm15
mulss %xmm13, %xmm15
movss %xmm15, 20(%rsp)
movss 16(%rsp), %xmm15
mulss %xmm9, %xmm15
movaps %xmm14, %xmm10
mulss %xmm8, %xmm10
subss %xmm10, %xmm15
movss %xmm15, 24(%rsp)
movaps %xmm11, %xmm10
subss 20(%rsp), %xmm10
movaps %xmm10, %xmm15
mulss %xmm7, %xmm15
addss 24(%rsp), %xmm15
divss %xmm0, %xmm15
movss %xmm15, 24(%rsp)
movd %ecx, %xmm10
comiss %xmm15, %xmm10
jbe .L9
comiss .LC7(%rip), %xmm15
jbe .L9
mulss %xmm13, %xmm6
mulss 28(%rsp), %xmm5
subss %xmm5, %xmm6
movaps %xmm14, %xmm5
movss 12(%rsp), %xmm13
mulss %xmm13, %xmm5
movss 16(%rsp), %xmm14
movss 8(%rsp), %xmm10
mulss %xmm14, %xmm10
subss %xmm10, %xmm5
movaps %xmm6, %xmm10
mulss %xmm7, %xmm10
addss %xmm10, %xmm5
divss %xmm0, %xmm5
pxor %xmm10, %xmm10
comiss %xmm10, %xmm5
jb .L9
movss .LC7(%rip), %xmm10
comiss %xmm5, %xmm10
jb .L9
movss 20(%rsp), %xmm10
subss %xmm11, %xmm10
mulss %xmm13, %xmm10
mulss %xmm8, %xmm6
subss %xmm6, %xmm10
mulss %xmm14, %xmm12
movaps %xmm10, %xmm6
addss %xmm12, %xmm6
divss %xmm0, %xmm6
pxor %xmm0, %xmm0
comiss %xmm0, %xmm6
jbe .L9
movss .LC7(%rip), %xmm10
movaps %xmm5, %xmm0
addss %xmm6, %xmm0
comiss %xmm0, %xmm10
jbe .L9
movslq %ebp, %rax
imulq $715827883, %rax, %rax
sarq $33, %rax
movl %ebp, %esi
sarl $31, %esi
subl %esi, %eax
leal (%rax,%rax,2), %eax
sall $2, %eax
movl %ebp, %esi
subl %eax, %esi
cmpl $7, %esi
jg .L9
movl $1, (%r15)
movl %ebp, (%r14)
movaps %xmm8, %xmm10
mulss %xmm5, %xmm10
movaps %xmm7, %xmm0
mulss %xmm6, %xmm0
addss %xmm10, %xmm0
addss (%rbx), %xmm0
movss %xmm0, 0(%r13)
movaps %xmm2, %xmm10
mulss %xmm5, %xmm10
movaps %xmm3, %xmm0
mulss %xmm6, %xmm0
addss %xmm10, %xmm0
addss 4(%rbx), %xmm0
movss %xmm0, 4(%r13)
mulss %xmm1, %xmm5
mulss %xmm4, %xmm6
addss %xmm6, %xmm5
addss 8(%rbx), %xmm5
movss %xmm5, 8(%r13)
movss %xmm9, 44(%rsp)
mulss %xmm7, %xmm1
mulss %xmm8, %xmm4
subss %xmm4, %xmm1
movss %xmm1, 48(%rsp)
mulss %xmm3, %xmm8
mulss %xmm7, %xmm2
subss %xmm2, %xmm8
movss %xmm8, 52(%rsp)
leaq 44(%rsp), %rdi
leaq 52(%rsp), %rdx
leaq 48(%rsp), %rsi
call _Z12UitizeVectorPfS_S_
movq 44(%rsp), %xmm2
movss 52(%rsp), %xmm3
movq 140(%rsp), %xmm0
movss 148(%rsp), %xmm1
call _Z18GetReflectedVector5PointS_
movq %xmm0, 12(%r13)
movss %xmm1, 20(%r13)
movl 24(%rsp), %ecx
jmp .L9
.L7:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L28
movq %r13, %rax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3804:
.size _Z26CalculateReflectedRayOnCPU3RayP4FaceiPiS2_, .-_Z26CalculateReflectedRayOnCPU3RayP4FaceiPiS2_
.globl _Z33CalculateVirtualReflectedRayOnCPU3Ray4Face
.type _Z33CalculateVirtualReflectedRayOnCPU3Ray4Face, @function
_Z33CalculateVirtualReflectedRayOnCPU3Ray4Face:
.LFB3805:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbx
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl 104(%rsp), %ecx
movss 108(%rsp), %xmm13
movss 112(%rsp), %xmm14
movl $0x00000000, (%rdi)
movl $0x00000000, 4(%rdi)
movl $0x00000000, 8(%rdi)
movl $0x00000000, 12(%rdi)
movl $0x00000000, 16(%rdi)
movl $0x00000000, 20(%rdi)
movss .LC4(%rip), %xmm0
movss 92(%rsp), %xmm1
xorps %xmm0, %xmm1
movaps %xmm1, %xmm15
movss 96(%rsp), %xmm4
xorps %xmm0, %xmm4
movss 100(%rsp), %xmm7
xorps %xmm0, %xmm7
movss 116(%rsp), %xmm6
movd %ecx, %xmm5
subss %xmm5, %xmm6
movss 120(%rsp), %xmm2
subss %xmm13, %xmm2
movss 124(%rsp), %xmm1
subss %xmm14, %xmm1
movss 128(%rsp), %xmm8
subss %xmm5, %xmm8
movss 132(%rsp), %xmm5
subss %xmm13, %xmm5
movss 136(%rsp), %xmm3
subss %xmm14, %xmm3
movaps %xmm2, %xmm9
mulss %xmm3, %xmm9
movaps %xmm1, %xmm0
mulss %xmm5, %xmm0
subss %xmm0, %xmm9
movaps %xmm4, %xmm11
mulss %xmm3, %xmm11
movaps %xmm7, %xmm0
mulss %xmm5, %xmm0
subss %xmm0, %xmm11
movaps %xmm4, %xmm10
mulss %xmm1, %xmm10
movaps %xmm7, %xmm0
mulss %xmm2, %xmm0
subss %xmm0, %xmm10
movss %xmm15, 20(%rsp)
movaps %xmm15, %xmm0
mulss %xmm9, %xmm0
movaps %xmm11, %xmm12
mulss %xmm6, %xmm12
subss %xmm12, %xmm0
movaps %xmm10, %xmm12
mulss %xmm8, %xmm12
addss %xmm12, %xmm0
movaps %xmm0, %xmm12
andps .LC5(%rip), %xmm12
cvtss2sd %xmm12, %xmm12
comisd .LC6(%rip), %xmm12
jbe .L29
movss 80(%rsp), %xmm12
movd %ecx, %xmm15
subss %xmm15, %xmm12
movss %xmm12, 24(%rsp)
movss 84(%rsp), %xmm12
subss %xmm13, %xmm12
movaps %xmm12, %xmm15
movss 88(%rsp), %xmm12
subss %xmm14, %xmm12
movss %xmm12, 4(%rsp)
movss %xmm15, 28(%rsp)
movaps %xmm15, %xmm12
mulss %xmm3, %xmm12
movaps %xmm12, %xmm15
movss 4(%rsp), %xmm12
mulss %xmm5, %xmm12
subss %xmm12, %xmm15
movss %xmm15, 8(%rsp)
movss 28(%rsp), %xmm12
mulss %xmm1, %xmm12
movss %xmm12, 12(%rsp)
movss 4(%rsp), %xmm15
mulss %xmm2, %xmm15
movss %xmm15, 16(%rsp)
movss 24(%rsp), %xmm12
mulss %xmm9, %xmm12
movss 8(%rsp), %xmm15
mulss %xmm6, %xmm15
subss %xmm15, %xmm12
movss 12(%rsp), %xmm15
subss 16(%rsp), %xmm15
mulss %xmm8, %xmm15
addss %xmm15, %xmm12
divss %xmm0, %xmm12
movss .LC2(%rip), %xmm15
comiss %xmm12, %xmm15
jbe .L29
comiss .LC7(%rip), %xmm12
ja .L39
.L29:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L40
movq %rbx, %rax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
mulss 4(%rsp), %xmm4
mulss 28(%rsp), %xmm7
subss %xmm7, %xmm4
movss 8(%rsp), %xmm7
movss 20(%rsp), %xmm15
mulss %xmm15, %xmm7
movss 24(%rsp), %xmm12
mulss %xmm12, %xmm11
subss %xmm11, %xmm7
movaps %xmm4, %xmm11
mulss %xmm8, %xmm11
addss %xmm11, %xmm7
divss %xmm0, %xmm7
movss 16(%rsp), %xmm11
subss 12(%rsp), %xmm11
mulss %xmm15, %xmm11
mulss %xmm6, %xmm4
subss %xmm4, %xmm11
mulss %xmm12, %xmm10
addss %xmm10, %xmm11
divss %xmm0, %xmm11
movaps %xmm6, %xmm0
mulss %xmm7, %xmm0
movaps %xmm8, %xmm4
mulss %xmm11, %xmm4
addss %xmm4, %xmm0
movd %ecx, %xmm4
addss %xmm4, %xmm0
movss %xmm0, (%rdi)
movaps %xmm2, %xmm0
mulss %xmm7, %xmm0
movaps %xmm5, %xmm4
mulss %xmm11, %xmm4
addss %xmm4, %xmm0
addss %xmm13, %xmm0
movss %xmm0, 4(%rdi)
mulss %xmm1, %xmm7
mulss %xmm3, %xmm11
addss %xmm11, %xmm7
addss %xmm14, %xmm7
movss %xmm7, 8(%rdi)
movss %xmm9, 44(%rsp)
mulss %xmm8, %xmm1
mulss %xmm6, %xmm3
subss %xmm3, %xmm1
movss %xmm1, 48(%rsp)
mulss %xmm5, %xmm6
mulss %xmm8, %xmm2
subss %xmm2, %xmm6
movss %xmm6, 52(%rsp)
leaq 44(%rsp), %rdi
leaq 52(%rsp), %rdx
leaq 48(%rsp), %rsi
call _Z12UitizeVectorPfS_S_
movq 44(%rsp), %xmm2
movss 52(%rsp), %xmm3
movq 92(%rsp), %xmm0
movss 100(%rsp), %xmm1
call _Z18GetReflectedVector5PointS_
movq %xmm0, 12(%rbx)
movss %xmm1, 20(%rbx)
jmp .L29
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3805:
.size _Z33CalculateVirtualReflectedRayOnCPU3Ray4Face, .-_Z33CalculateVirtualReflectedRayOnCPU3Ray4Face
.globl _Z37CalculateReflectedRayWithTerrainOnCPU3RayP4FaceiPiS2_
.type _Z37CalculateReflectedRayWithTerrainOnCPU3RayP4FaceiPiS2_, @function
_Z37CalculateReflectedRayWithTerrainOnCPU3RayP4FaceiPiS2_:
.LFB3806:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r13
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $0x00000000, (%rdi)
movl $0x00000000, 4(%rdi)
movl $0x00000000, 8(%rdi)
movl $0x00000000, 12(%rdi)
movl $0x00000000, 16(%rdi)
movl $0x00000000, 20(%rdi)
testl %edx, %edx
jle .L41
movl %edx, %r12d
movq %rcx, %r14
movq %r8, %r15
movq %rsi, %rbx
movl $0, %ebp
movl .LC2(%rip), %ecx
jmp .L51
.L43:
addl $1, %ebp
addq $36, %rbx
cmpl %ebp, %r12d
je .L41
.L51:
movss 140(%rsp), %xmm10
xorps .LC4(%rip), %xmm10
movaps %xmm10, %xmm13
movss 144(%rsp), %xmm6
xorps .LC4(%rip), %xmm6
movss 148(%rsp), %xmm5
xorps .LC4(%rip), %xmm5
movss (%rbx), %xmm11
movss 12(%rbx), %xmm8
subss %xmm11, %xmm8
movss 4(%rbx), %xmm14
movss 16(%rbx), %xmm2
subss %xmm14, %xmm2
movss 8(%rbx), %xmm15
movss 20(%rbx), %xmm1
subss %xmm15, %xmm1
movss 24(%rbx), %xmm7
subss %xmm11, %xmm7
movss 28(%rbx), %xmm3
subss %xmm14, %xmm3
movss 32(%rbx), %xmm4
subss %xmm15, %xmm4
movaps %xmm2, %xmm9
mulss %xmm4, %xmm9
movaps %xmm1, %xmm0
mulss %xmm3, %xmm0
subss %xmm0, %xmm9
movaps %xmm6, %xmm10
mulss %xmm4, %xmm10
movaps %xmm5, %xmm0
mulss %xmm3, %xmm0
subss %xmm0, %xmm10
movaps %xmm6, %xmm12
mulss %xmm1, %xmm12
movaps %xmm5, %xmm0
mulss %xmm2, %xmm0
subss %xmm0, %xmm12
movss %xmm13, 12(%rsp)
movaps %xmm13, %xmm0
mulss %xmm9, %xmm0
movss %xmm10, 8(%rsp)
movaps %xmm10, %xmm13
mulss %xmm8, %xmm13
subss %xmm13, %xmm0
movaps %xmm12, %xmm13
mulss %xmm7, %xmm13
addss %xmm13, %xmm0
movaps %xmm0, %xmm13
andps .LC5(%rip), %xmm13
cvtss2sd %xmm13, %xmm13
comisd .LC6(%rip), %xmm13
jbe .L43
movss 128(%rsp), %xmm13
subss %xmm11, %xmm13
movss %xmm13, 16(%rsp)
movss 132(%rsp), %xmm11
subss %xmm14, %xmm11
movaps %xmm11, %xmm10
movss 136(%rsp), %xmm13
subss %xmm15, %xmm13
movaps %xmm11, %xmm15
mulss %xmm4, %xmm15
movaps %xmm3, %xmm11
mulss %xmm13, %xmm11
movaps %xmm15, %xmm14
subss %xmm11, %xmm14
movss %xmm10, 28(%rsp)
movaps %xmm10, %xmm11
mulss %xmm1, %xmm11
movaps %xmm2, %xmm15
mulss %xmm13, %xmm15
movss %xmm15, 20(%rsp)
movss 16(%rsp), %xmm15
mulss %xmm9, %xmm15
movaps %xmm14, %xmm10
mulss %xmm8, %xmm10
subss %xmm10, %xmm15
movss %xmm15, 24(%rsp)
movaps %xmm11, %xmm10
subss 20(%rsp), %xmm10
movaps %xmm10, %xmm15
mulss %xmm7, %xmm15
addss 24(%rsp), %xmm15
divss %xmm0, %xmm15
movd %ecx, %xmm10
comiss %xmm15, %xmm10
jbe .L43
movss %xmm15, 24(%rsp)
comiss .LC7(%rip), %xmm15
jbe .L43
mulss %xmm13, %xmm6
mulss 28(%rsp), %xmm5
subss %xmm5, %xmm6
movaps %xmm14, %xmm5
movss 12(%rsp), %xmm13
mulss %xmm13, %xmm5
movss 16(%rsp), %xmm14
movss 8(%rsp), %xmm10
mulss %xmm14, %xmm10
subss %xmm10, %xmm5
movaps %xmm6, %xmm10
mulss %xmm7, %xmm10
addss %xmm10, %xmm5
divss %xmm0, %xmm5
pxor %xmm10, %xmm10
comiss %xmm10, %xmm5
jb .L43
movss .LC7(%rip), %xmm10
comiss %xmm5, %xmm10
jb .L43
movss 20(%rsp), %xmm10
subss %xmm11, %xmm10
mulss %xmm13, %xmm10
mulss %xmm8, %xmm6
subss %xmm6, %xmm10
mulss %xmm14, %xmm12
movaps %xmm10, %xmm6
addss %xmm12, %xmm6
divss %xmm0, %xmm6
pxor %xmm0, %xmm0
comiss %xmm0, %xmm6
jbe .L43
movss .LC7(%rip), %xmm10
movaps %xmm5, %xmm0
addss %xmm6, %xmm0
comiss %xmm0, %xmm10
jbe .L43
movl $1, (%r15)
movl %ebp, (%r14)
movaps %xmm8, %xmm10
mulss %xmm5, %xmm10
movaps %xmm7, %xmm0
mulss %xmm6, %xmm0
addss %xmm10, %xmm0
addss (%rbx), %xmm0
movss %xmm0, 0(%r13)
movaps %xmm2, %xmm10
mulss %xmm5, %xmm10
movaps %xmm3, %xmm0
mulss %xmm6, %xmm0
addss %xmm10, %xmm0
addss 4(%rbx), %xmm0
movss %xmm0, 4(%r13)
mulss %xmm1, %xmm5
mulss %xmm4, %xmm6
addss %xmm6, %xmm5
addss 8(%rbx), %xmm5
movss %xmm5, 8(%r13)
movss %xmm9, 44(%rsp)
mulss %xmm7, %xmm1
mulss %xmm8, %xmm4
subss %xmm4, %xmm1
movss %xmm1, 48(%rsp)
mulss %xmm3, %xmm8
mulss %xmm7, %xmm2
subss %xmm2, %xmm8
movss %xmm8, 52(%rsp)
leaq 44(%rsp), %rdi
leaq 52(%rsp), %rdx
leaq 48(%rsp), %rsi
call _Z12UitizeVectorPfS_S_
movq 44(%rsp), %xmm2
movss 52(%rsp), %xmm3
movq 140(%rsp), %xmm0
movss 148(%rsp), %xmm1
call _Z18GetReflectedVector5PointS_
movq %xmm0, 12(%r13)
movss %xmm1, 20(%r13)
movl 24(%rsp), %ecx
jmp .L43
.L41:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L62
movq %r13, %rax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L62:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3806:
.size _Z37CalculateReflectedRayWithTerrainOnCPU3RayP4FaceiPiS2_, .-_Z37CalculateReflectedRayWithTerrainOnCPU3RayP4FaceiPiS2_
.globl _Z21GetIntersectionOfRays3RayS_
.type _Z21GetIntersectionOfRays3RayS_, @function
_Z21GetIntersectionOfRays3RayS_:
.LFB3807:
.cfi_startproc
endbr64
movss 8(%rsp), %xmm9
movss 12(%rsp), %xmm8
movss 16(%rsp), %xmm1
movss 20(%rsp), %xmm12
movss 24(%rsp), %xmm10
movss 28(%rsp), %xmm11
movss .LC4(%rip), %xmm2
movss 44(%rsp), %xmm6
xorps %xmm2, %xmm6
movss 48(%rsp), %xmm4
xorps %xmm2, %xmm4
movss 52(%rsp), %xmm0
movaps %xmm0, %xmm7
xorps %xmm2, %xmm7
movss 32(%rsp), %xmm3
subss %xmm9, %xmm3
movss 36(%rsp), %xmm5
subss %xmm8, %xmm5
movss 40(%rsp), %xmm0
subss %xmm1, %xmm0
movaps %xmm4, %xmm2
mulss %xmm0, %xmm2
movaps %xmm7, %xmm13
mulss %xmm5, %xmm13
subss %xmm13, %xmm2
mulss %xmm12, %xmm2
mulss %xmm10, %xmm0
movaps %xmm11, %xmm13
mulss %xmm5, %xmm13
subss %xmm13, %xmm0
mulss %xmm6, %xmm0
subss %xmm0, %xmm2
mulss %xmm10, %xmm7
movaps %xmm7, %xmm0
movaps %xmm11, %xmm7
mulss %xmm4, %xmm7
subss %xmm7, %xmm0
mulss %xmm3, %xmm0
addss %xmm2, %xmm0
andps .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movsd .LC8(%rip), %xmm2
comisd %xmm0, %xmm2
jbe .L71
mulss %xmm4, %xmm3
mulss %xmm6, %xmm5
subss %xmm5, %xmm3
movaps %xmm3, %xmm0
andps .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm0, %xmm2
ja .L64
mulss %xmm12, %xmm4
mulss %xmm10, %xmm6
subss %xmm6, %xmm4
movaps %xmm4, %xmm0
andps .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
comisd .LC9(%rip), %xmm0
jbe .L72
divss %xmm4, %xmm3
mulss %xmm3, %xmm12
addss %xmm12, %xmm9
mulss %xmm3, %xmm10
addss %xmm10, %xmm8
mulss %xmm3, %xmm11
addss %xmm11, %xmm1
jmp .L64
.L71:
pxor %xmm1, %xmm1
movaps %xmm1, %xmm8
movaps %xmm1, %xmm9
.L64:
movss %xmm9, -20(%rsp)
movss %xmm8, -16(%rsp)
movq -20(%rsp), %xmm0
ret
.L72:
pxor %xmm1, %xmm1
movaps %xmm1, %xmm8
movaps %xmm1, %xmm9
jmp .L64
.cfi_endproc
.LFE3807:
.size _Z21GetIntersectionOfRays3RayS_, .-_Z21GetIntersectionOfRays3RayS_
.globl _Z15JudgePointEqual5PointS_
.type _Z15JudgePointEqual5PointS_, @function
_Z15JudgePointEqual5PointS_:
.LFB3808:
.cfi_startproc
endbr64
movq %xmm0, -16(%rsp)
movss %xmm1, -8(%rsp)
movq %xmm2, -32(%rsp)
movss %xmm3, -24(%rsp)
movss -16(%rsp), %xmm0
ucomiss -32(%rsp), %xmm0
jp .L77
jne .L77
movss -12(%rsp), %xmm0
ucomiss -28(%rsp), %xmm0
jp .L78
jne .L78
ucomiss %xmm3, %xmm1
setnp %al
movl $0, %edx
cmovne %edx, %eax
ret
.L77:
movl $0, %eax
ret
.L78:
movl $0, %eax
ret
.cfi_endproc
.LFE3808:
.size _Z15JudgePointEqual5PointS_, .-_Z15JudgePointEqual5PointS_
.globl _Z14ExchangeTwoRayP3RayS0_
.type _Z14ExchangeTwoRayP3RayS0_, @function
_Z14ExchangeTwoRayP3RayS0_:
.LFB3809:
.cfi_startproc
endbr64
movss (%rdi), %xmm5
movss 4(%rdi), %xmm4
movss 8(%rdi), %xmm3
movss 12(%rdi), %xmm2
movss 16(%rdi), %xmm1
movss 20(%rdi), %xmm0
movdqu (%rsi), %xmm6
movups %xmm6, (%rdi)
movq 16(%rsi), %rax
movq %rax, 16(%rdi)
movss %xmm5, (%rsi)
movss %xmm4, 4(%rsi)
movss %xmm3, 8(%rsi)
movss %xmm2, 12(%rsi)
movss %xmm1, 16(%rsi)
movss %xmm0, 20(%rsi)
ret
.cfi_endproc
.LFE3809:
.size _Z14ExchangeTwoRayP3RayS0_, .-_Z14ExchangeTwoRayP3RayS0_
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3835:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3835:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 1293080650
.long 1074340347
.align 8
.LC1:
.long 0
.long 1080459264
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 1195593728
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC4:
.long -2147483648
.long 0
.long 0
.long 0
.align 16
.LC5:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC6:
.long -1598689907
.long 1051772663
.section .rodata.cst4
.align 4
.LC7:
.long 1065353216
.section .rodata.cst8
.align 8
.LC8:
.long -755914244
.long 1062232653
.align 8
.LC9:
.long -1998362383
.long 1055193269
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "BasicTool.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* It is essential for the programmer to choose the correct GPU device from a list of GPU devices
available to perform the calculations very efficiently,
For Example: To run the double precision applications devices with compute capacity of more than 1.3 are desired.
The following program utilizes the cudaDeviceProp structure to get the info on the same*/
// Note: There are lot of properties available with the cudaDeviceProp struct
#include <stdio.h>
#include <iostream>
using namespace std;
int main()
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count); //Gives the number of devices
std::cout<<count<<std::endl;
for(int i=0;i<count;i++)
{
cudaGetDeviceProperties(&prop,i);
printf( "Name: %s\n", prop.name ); // Gives the name of the device
printf("Computation Capability: %d.%d\n", prop.major,prop.minor); // Givees the compute capability of the device
}
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* It is essential for the programmer to choose the correct GPU device from a list of GPU devices
available to perform the calculations very efficiently,
For Example: To run the double precision applications devices with compute capacity of more than 1.3 are desired.
The following program utilizes the cudaDeviceProp structure to get the info on the same*/
// Note: There are lot of properties available with the cudaDeviceProp struct
#include <stdio.h>
#include <iostream>
using namespace std;
int main()
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count); //Gives the number of devices
std::cout<<count<<std::endl;
for(int i=0;i<count;i++)
{
cudaGetDeviceProperties(&prop,i);
printf( "Name: %s\n", prop.name ); // Gives the name of the device
printf("Computation Capability: %d.%d\n", prop.major,prop.minor); // Givees the compute capability of the device
}
} | .file "tmpxft_0006f324_00000000-6_deviceprop.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Name: %s\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Computation Capability: %d.%d\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $1064, %rsp
.cfi_def_cfa_offset 1104
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
cmpl $0, 12(%rsp)
jle .L4
movl $0, %ebx
leaq 16(%rsp), %rbp
leaq .LC0(%rip), %r13
leaq .LC1(%rip), %r12
.L5:
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
movq %rbp, %rdx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 380(%rsp), %ecx
movl 376(%rsp), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jg .L5
.L4:
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L9
movl $0, %eax
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* It is essential for the programmer to choose the correct GPU device from a list of GPU devices
available to perform the calculations very efficiently,
For Example: To run the double precision applications devices with compute capacity of more than 1.3 are desired.
The following program utilizes the cudaDeviceProp structure to get the info on the same*/
// Note: There are lot of properties available with the cudaDeviceProp struct
#include <stdio.h>
#include <iostream>
using namespace std;
int main()
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count); //Gives the number of devices
std::cout<<count<<std::endl;
for(int i=0;i<count;i++)
{
cudaGetDeviceProperties(&prop,i);
printf( "Name: %s\n", prop.name ); // Gives the name of the device
printf("Computation Capability: %d.%d\n", prop.major,prop.minor); // Givees the compute capability of the device
}
} | /* It is essential for the programmer to choose the correct GPU device from a list of GPU devices
available to perform the calculations very efficiently,
For Example: To run the double precision applications devices with compute capacity of more than 1.3 are desired.
The following program utilizes the cudaDeviceProp structure to get the info on the same*/
// Note: There are lot of properties available with the cudaDeviceProp struct
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
using namespace std;
int main()
{
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count); //Gives the number of devices
std::cout<<count<<std::endl;
for(int i=0;i<count;i++)
{
hipGetDeviceProperties(&prop,i);
printf( "Name: %s\n", prop.name ); // Gives the name of the device
printf("Computation Capability: %d.%d\n", prop.major,prop.minor); // Givees the compute capability of the device
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* It is essential for the programmer to choose the correct GPU device from a list of GPU devices
available to perform the calculations very efficiently,
For Example: To run the double precision applications devices with compute capacity of more than 1.3 are desired.
The following program utilizes the cudaDeviceProp structure to get the info on the same*/
// Note: There are lot of properties available with the cudaDeviceProp struct
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
using namespace std;
int main()
{
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count); //Gives the number of devices
std::cout<<count<<std::endl;
for(int i=0;i<count;i++)
{
hipGetDeviceProperties(&prop,i);
printf( "Name: %s\n", prop.name ); // Gives the name of the device
printf("Computation Capability: %d.%d\n", prop.major,prop.minor); // Givees the compute capability of the device
}
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* It is essential for the programmer to choose the correct GPU device from a list of GPU devices
available to perform the calculations very efficiently,
For Example: To run the double precision applications devices with compute capacity of more than 1.3 are desired.
The following program utilizes the cudaDeviceProp structure to get the info on the same*/
// Note: There are lot of properties available with the cudaDeviceProp struct
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
using namespace std;
int main()
{
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count); //Gives the number of devices
std::cout<<count<<std::endl;
for(int i=0;i<count;i++)
{
hipGetDeviceProperties(&prop,i);
printf( "Name: %s\n", prop.name ); // Gives the name of the device
printf("Computation Capability: %d.%d\n", prop.major,prop.minor); // Givees the compute capability of the device
}
} | .text
.file "deviceprop.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 1520
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
movl 12(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB0_8
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB0_3
# %bb.2:
movzbl 67(%rbx), %ecx
jmp .LBB0_4
.LBB0_3:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB0_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
cmpl $0, 12(%rsp)
jle .LBB0_7
# %bb.5: # %.lr.ph
leaq 16(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_6: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl 376(%rsp), %esi
movl 380(%rsp), %edx
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl 12(%rsp), %ebp
jl .LBB0_6
.LBB0_7: # %._crit_edge
xorl %eax, %eax
addq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_8:
.cfi_def_cfa_offset 1520
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Name: %s\n"
.size .L.str, 10
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Computation Capability: %d.%d\n"
.size .L.str.1, 31
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0006f324_00000000-6_deviceprop.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Name: %s\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Computation Capability: %d.%d\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $1064, %rsp
.cfi_def_cfa_offset 1104
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
cmpl $0, 12(%rsp)
jle .L4
movl $0, %ebx
leaq 16(%rsp), %rbp
leaq .LC0(%rip), %r13
leaq .LC1(%rip), %r12
.L5:
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
movq %rbp, %rdx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 380(%rsp), %ecx
movl 376(%rsp), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jg .L5
.L4:
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L9
movl $0, %eax
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "deviceprop.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 1520
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
movl 12(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB0_8
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB0_3
# %bb.2:
movzbl 67(%rbx), %ecx
jmp .LBB0_4
.LBB0_3:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB0_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
cmpl $0, 12(%rsp)
jle .LBB0_7
# %bb.5: # %.lr.ph
leaq 16(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_6: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl 376(%rsp), %esi
movl 380(%rsp), %edx
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl 12(%rsp), %ebp
jl .LBB0_6
.LBB0_7: # %._crit_edge
xorl %eax, %eax
addq $1488, %rsp # imm = 0x5D0
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_8:
.cfi_def_cfa_offset 1520
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Name: %s\n"
.size .L.str, 10
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Computation Capability: %d.%d\n"
.size .L.str.1, 31
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //
// Simple and fast atof (ascii to float) function.
//
// - Executes about 5x faster than standard MSCRT library atof().
// - An attractive alternative if the number of calls is in the millions.
// - Assumes input is a proper integer, fraction, or scientific format.
// - Matches library atof() to 15 digits (except at extreme exponents).
// - Follows atof() precedent of essentially no error checking.
//
// 09-May-2009 Tom Van Baak (tvb) www.LeapSecond.com
//
#define white_space(c) ((c) == ' ' || (c) == '\t')
#define valid_digit(c) ((c) >= '0' && (c) <= '9')
double atoff (const char *p)
{
int frac;
double sign, value, scale;
// Skip leading white space, if any.
while (white_space(*p) ) {
p += 1;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits before decimal point or exponent, if any.
for (value = 0.0; valid_digit(*p); p += 1) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double pow10 = 10.0;
p += 1;
while (valid_digit(*p)) {
value += (*p - '0') / pow10;
pow10 *= 10.0;
p += 1;
}
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
unsigned int expon;
// Get sign of exponent, if any.
p += 1;
if (*p == '-') {
frac = 1;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits of exponent, if any.
for (expon = 0; valid_digit(*p); p += 1) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
return sign * (frac ? (value / scale) : (value * scale));
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //
// Simple and fast atof (ascii to float) function.
//
// - Executes about 5x faster than standard MSCRT library atof().
// - An attractive alternative if the number of calls is in the millions.
// - Assumes input is a proper integer, fraction, or scientific format.
// - Matches library atof() to 15 digits (except at extreme exponents).
// - Follows atof() precedent of essentially no error checking.
//
// 09-May-2009 Tom Van Baak (tvb) www.LeapSecond.com
//
#define white_space(c) ((c) == ' ' || (c) == '\t')
#define valid_digit(c) ((c) >= '0' && (c) <= '9')
double atoff (const char *p)
{
int frac;
double sign, value, scale;
// Skip leading white space, if any.
while (white_space(*p) ) {
p += 1;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits before decimal point or exponent, if any.
for (value = 0.0; valid_digit(*p); p += 1) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double pow10 = 10.0;
p += 1;
while (valid_digit(*p)) {
value += (*p - '0') / pow10;
pow10 *= 10.0;
p += 1;
}
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
unsigned int expon;
// Get sign of exponent, if any.
p += 1;
if (*p == '-') {
frac = 1;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits of exponent, if any.
for (expon = 0; valid_digit(*p); p += 1) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
return sign * (frac ? (value / scale) : (value * scale));
} | .file "tmpxft_001ba979_00000000-6_atof.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z5atoffPKc
.type _Z5atoffPKc, @function
_Z5atoffPKc:
.LFB2027:
.cfi_startproc
endbr64
movzbl (%rdi), %eax
cmpb $9, %al
je .L37
cmpb $32, %al
jne .L4
.L37:
addq $1, %rdi
movzbl (%rdi), %eax
cmpb $9, %al
je .L37
cmpb $32, %al
je .L37
.L4:
cmpb $45, %al
je .L38
movsd .LC1(%rip), %xmm0
cmpb $43, %al
je .L39
.L8:
movzbl (%rdi), %eax
leal -48(%rax), %edx
cmpb $9, %dl
ja .L26
pxor %xmm1, %xmm1
movsd .LC3(%rip), %xmm3
.L10:
mulsd %xmm3, %xmm1
subl $48, %eax
movsbl %al, %eax
pxor %xmm2, %xmm2
cvtsi2sdl %eax, %xmm2
addsd %xmm2, %xmm1
addq $1, %rdi
movzbl (%rdi), %eax
leal -48(%rax), %edx
cmpb $9, %dl
jbe .L10
.L9:
cmpb $46, %al
je .L40
.L11:
movzbl (%rdi), %eax
andl $-33, %eax
cmpb $69, %al
jne .L28
movzbl 1(%rdi), %eax
cmpb $45, %al
je .L41
cmpb $43, %al
je .L16
addq $1, %rdi
movl $0, %esi
.L15:
movzbl (%rdi), %eax
leal -48(%rax), %edx
cmpb $9, %dl
ja .L29
movl $0, %edx
.L18:
leal (%rdx,%rdx,4), %edx
movsbl %al, %eax
leal -48(%rax,%rdx,2), %edx
addq $1, %rdi
movzbl (%rdi), %eax
leal -48(%rax), %ecx
cmpb $9, %cl
jbe .L18
movl $308, %eax
cmpl %eax, %edx
cmovbe %edx, %eax
cmpl $49, %edx
jbe .L30
movsd .LC1(%rip), %xmm2
movsd .LC4(%rip), %xmm3
.L20:
mulsd %xmm3, %xmm2
subl $50, %eax
cmpl $49, %eax
ja .L20
.L19:
cmpl $7, %eax
jbe .L17
movl %eax, %edx
movsd .LC5(%rip), %xmm3
.L21:
mulsd %xmm3, %xmm2
subl $8, %edx
cmpl $7, %edx
ja .L21
andl $7, %eax
.L17:
testl %eax, %eax
je .L22
movsd .LC3(%rip), %xmm3
.L23:
mulsd %xmm3, %xmm2
subl $1, %eax
jne .L23
.L22:
testl %esi, %esi
je .L13
divsd %xmm2, %xmm1
jmp .L24
.L38:
addq $1, %rdi
movsd .LC0(%rip), %xmm0
jmp .L8
.L39:
addq $1, %rdi
jmp .L8
.L26:
pxor %xmm1, %xmm1
jmp .L9
.L40:
leaq 1(%rdi), %rdx
movzbl 1(%rdi), %eax
leal -48(%rax), %ecx
cmpb $9, %cl
ja .L27
movsd .LC3(%rip), %xmm3
movapd %xmm3, %xmm4
.L12:
subl $48, %eax
movsbl %al, %eax
pxor %xmm2, %xmm2
cvtsi2sdl %eax, %xmm2
divsd %xmm3, %xmm2
addsd %xmm2, %xmm1
mulsd %xmm4, %xmm3
addq $1, %rdx
movzbl (%rdx), %eax
leal -48(%rax), %ecx
cmpb $9, %cl
jbe .L12
movq %rdx, %rdi
jmp .L11
.L27:
movq %rdx, %rdi
jmp .L11
.L41:
addq $2, %rdi
movl $1, %esi
jmp .L15
.L16:
addq $2, %rdi
movl $0, %esi
jmp .L15
.L30:
movsd .LC1(%rip), %xmm2
jmp .L19
.L29:
movl $0, %eax
movsd .LC1(%rip), %xmm2
jmp .L17
.L28:
movsd .LC1(%rip), %xmm2
.L13:
mulsd %xmm2, %xmm1
.L24:
mulsd %xmm1, %xmm0
ret
.cfi_endproc
.LFE2027:
.size _Z5atoffPKc, .-_Z5atoffPKc
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long -1074790400
.align 8
.LC1:
.long 0
.long 1072693248
.align 8
.LC3:
.long 0
.long 1076101120
.align 8
.LC4:
.long -981572454
.long 1246829326
.align 8
.LC5:
.long 0
.long 1100470148
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //
// Simple and fast atof (ascii to float) function.
//
// - Executes about 5x faster than standard MSCRT library atof().
// - An attractive alternative if the number of calls is in the millions.
// - Assumes input is a proper integer, fraction, or scientific format.
// - Matches library atof() to 15 digits (except at extreme exponents).
// - Follows atof() precedent of essentially no error checking.
//
// 09-May-2009 Tom Van Baak (tvb) www.LeapSecond.com
//
#define white_space(c) ((c) == ' ' || (c) == '\t')
#define valid_digit(c) ((c) >= '0' && (c) <= '9')
double atoff (const char *p)
{
int frac;
double sign, value, scale;
// Skip leading white space, if any.
while (white_space(*p) ) {
p += 1;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits before decimal point or exponent, if any.
for (value = 0.0; valid_digit(*p); p += 1) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double pow10 = 10.0;
p += 1;
while (valid_digit(*p)) {
value += (*p - '0') / pow10;
pow10 *= 10.0;
p += 1;
}
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
unsigned int expon;
// Get sign of exponent, if any.
p += 1;
if (*p == '-') {
frac = 1;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits of exponent, if any.
for (expon = 0; valid_digit(*p); p += 1) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
return sign * (frac ? (value / scale) : (value * scale));
} | #include <hip/hip_runtime.h>
//
// Simple and fast atof (ascii to float) function.
//
// - Executes about 5x faster than standard MSCRT library atof().
// - An attractive alternative if the number of calls is in the millions.
// - Assumes input is a proper integer, fraction, or scientific format.
// - Matches library atof() to 15 digits (except at extreme exponents).
// - Follows atof() precedent of essentially no error checking.
//
// 09-May-2009 Tom Van Baak (tvb) www.LeapSecond.com
//
#define white_space(c) ((c) == ' ' || (c) == '\t')
#define valid_digit(c) ((c) >= '0' && (c) <= '9')
double atoff (const char *p)
{
int frac;
double sign, value, scale;
// Skip leading white space, if any.
while (white_space(*p) ) {
p += 1;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits before decimal point or exponent, if any.
for (value = 0.0; valid_digit(*p); p += 1) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double pow10 = 10.0;
p += 1;
while (valid_digit(*p)) {
value += (*p - '0') / pow10;
pow10 *= 10.0;
p += 1;
}
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
unsigned int expon;
// Get sign of exponent, if any.
p += 1;
if (*p == '-') {
frac = 1;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits of exponent, if any.
for (expon = 0; valid_digit(*p); p += 1) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
return sign * (frac ? (value / scale) : (value * scale));
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
//
// Simple and fast atof (ascii to float) function.
//
// - Executes about 5x faster than standard MSCRT library atof().
// - An attractive alternative if the number of calls is in the millions.
// - Assumes input is a proper integer, fraction, or scientific format.
// - Matches library atof() to 15 digits (except at extreme exponents).
// - Follows atof() precedent of essentially no error checking.
//
// 09-May-2009 Tom Van Baak (tvb) www.LeapSecond.com
//
#define white_space(c) ((c) == ' ' || (c) == '\t')
#define valid_digit(c) ((c) >= '0' && (c) <= '9')
double atoff (const char *p)
{
int frac;
double sign, value, scale;
// Skip leading white space, if any.
while (white_space(*p) ) {
p += 1;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits before decimal point or exponent, if any.
for (value = 0.0; valid_digit(*p); p += 1) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double pow10 = 10.0;
p += 1;
while (valid_digit(*p)) {
value += (*p - '0') / pow10;
pow10 *= 10.0;
p += 1;
}
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
unsigned int expon;
// Get sign of exponent, if any.
p += 1;
if (*p == '-') {
frac = 1;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits of exponent, if any.
for (expon = 0; valid_digit(*p); p += 1) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
return sign * (frac ? (value / scale) : (value * scale));
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
//
// Simple and fast atof (ascii to float) function.
//
// - Executes about 5x faster than standard MSCRT library atof().
// - An attractive alternative if the number of calls is in the millions.
// - Assumes input is a proper integer, fraction, or scientific format.
// - Matches library atof() to 15 digits (except at extreme exponents).
// - Follows atof() precedent of essentially no error checking.
//
// 09-May-2009 Tom Van Baak (tvb) www.LeapSecond.com
//
#define white_space(c) ((c) == ' ' || (c) == '\t')
#define valid_digit(c) ((c) >= '0' && (c) <= '9')
double atoff (const char *p)
{
int frac;
double sign, value, scale;
// Skip leading white space, if any.
while (white_space(*p) ) {
p += 1;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits before decimal point or exponent, if any.
for (value = 0.0; valid_digit(*p); p += 1) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double pow10 = 10.0;
p += 1;
while (valid_digit(*p)) {
value += (*p - '0') / pow10;
pow10 *= 10.0;
p += 1;
}
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
unsigned int expon;
// Get sign of exponent, if any.
p += 1;
if (*p == '-') {
frac = 1;
p += 1;
} else if (*p == '+') {
p += 1;
}
// Get digits of exponent, if any.
for (expon = 0; valid_digit(*p); p += 1) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
return sign * (frac ? (value / scale) : (value * scale));
} | .text
.file "atof.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z5atoffPKc
.LCPI0_0:
.quad 0x3ff0000000000000 # double 1
.LCPI0_1:
.quad 0xbff0000000000000 # double -1
.LCPI0_2:
.quad 0x4024000000000000 # double 10
.LCPI0_3:
.quad 0x4a511b0ec57e649a # double 1.0000000000000001E+50
.LCPI0_4:
.quad 0x4197d78400000000 # double 1.0E+8
.text
.globl _Z5atoffPKc
.p2align 4, 0x90
.type _Z5atoffPKc,@function
_Z5atoffPKc: # @_Z5atoffPKc
.cfi_startproc
# %bb.0:
.p2align 4, 0x90
.LBB0_1: # =>This Inner Loop Header: Depth=1
movzbl (%rdi), %eax
addl $-9, %eax
cmpl $36, %eax
ja .LBB0_6
# %bb.2: # in Loop: Header=BB0_1 Depth=1
jmpq *.LJTI0_0(,%rax,8)
.LBB0_3: # %.critedge
# in Loop: Header=BB0_1 Depth=1
incq %rdi
jmp .LBB0_1
.LBB0_5:
incq %rdi
.LBB0_6: # %.loopexit82.loopexit
movsd .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero
jmp .LBB0_7
.LBB0_4:
incq %rdi
movsd .LCPI0_1(%rip), %xmm0 # xmm0 = mem[0],zero
.LBB0_7: # %.loopexit82
movzbl (%rdi), %eax
leal -48(%rax), %ecx
xorpd %xmm1, %xmm1
cmpb $9, %cl
ja .LBB0_10
# %bb.8: # %.lr.ph.preheader
movsd .LCPI0_2(%rip), %xmm2 # xmm2 = mem[0],zero
.p2align 4, 0x90
.LBB0_9: # %.lr.ph
# =>This Inner Loop Header: Depth=1
mulsd %xmm2, %xmm1
addb $-48, %al
movsbl %al, %eax
xorps %xmm3, %xmm3
cvtsi2sd %eax, %xmm3
addsd %xmm3, %xmm1
movzbl 1(%rdi), %eax
incq %rdi
leal -48(%rax), %ecx
cmpb $10, %cl
jb .LBB0_9
.LBB0_10: # %.critedge2
cmpb $46, %al
jne .LBB0_14
# %bb.11: # %.preheader81
movzbl 1(%rdi), %eax
incq %rdi
leal -48(%rax), %ecx
cmpb $9, %cl
ja .LBB0_14
# %bb.12: # %.lr.ph95.preheader
movsd .LCPI0_2(%rip), %xmm2 # xmm2 = mem[0],zero
movapd %xmm2, %xmm3
.p2align 4, 0x90
.LBB0_13: # %.lr.ph95
# =>This Inner Loop Header: Depth=1
addb $-48, %al
movsbl %al, %eax
xorps %xmm4, %xmm4
cvtsi2sd %eax, %xmm4
divsd %xmm3, %xmm4
addsd %xmm4, %xmm1
mulsd %xmm2, %xmm3
movzbl 1(%rdi), %eax
incq %rdi
leal -48(%rax), %ecx
cmpb $10, %cl
jb .LBB0_13
.LBB0_14: # %.critedge4
movzbl (%rdi), %eax
orl $32, %eax
cmpl $101, %eax
jne .LBB0_15
# %bb.18:
leaq 1(%rdi), %rcx
movzbl 1(%rdi), %eax
leaq 2(%rdi), %rdx
xorl %esi, %esi
cmpb $43, %al
sete %sil
cmoveq %rdx, %rcx
incq %rsi
cmpb $45, %al
setne %al
movl $2, %r8d
cmovneq %rsi, %r8
cmoveq %rdx, %rcx
movzbl (%rdi,%r8), %edx
leal -48(%rdx), %edi
xorl %esi, %esi
cmpb $9, %dil
ja .LBB0_21
# %bb.19: # %.lr.ph101.preheader
incq %rcx
xorl %esi, %esi
.p2align 4, 0x90
.LBB0_20: # %.lr.ph101
# =>This Inner Loop Header: Depth=1
leal (%rsi,%rsi,4), %esi
addb $-48, %dl
movzbl %dl, %edx
leal (%rdx,%rsi,2), %esi
movzbl (%rcx), %edx
leal -48(%rdx), %edi
incq %rcx
cmpb $10, %dil
jb .LBB0_20
.LBB0_21: # %.critedge6
cmpl $308, %esi # imm = 0x134
movl $308, %ecx # imm = 0x134
cmovbl %esi, %ecx
movsd .LCPI0_0(%rip), %xmm2 # xmm2 = mem[0],zero
cmpl $50, %esi
jb .LBB0_22
# %bb.28: # %.lr.ph105.preheader
movsd .LCPI0_3(%rip), %xmm3 # xmm3 = mem[0],zero
.p2align 4, 0x90
.LBB0_29: # %.lr.ph105
# =>This Inner Loop Header: Depth=1
mulsd %xmm3, %xmm2
addl $-50, %ecx
cmpl $49, %ecx
ja .LBB0_29
.LBB0_22: # %.preheader80
cmpl $8, %ecx
jb .LBB0_25
# %bb.23:
movsd .LCPI0_4(%rip), %xmm3 # xmm3 = mem[0],zero
.p2align 4, 0x90
.LBB0_24: # %.lr.ph110
# =>This Inner Loop Header: Depth=1
mulsd %xmm3, %xmm2
addl $-8, %ecx
cmpl $7, %ecx
ja .LBB0_24
.LBB0_25: # %.preheader
testl %ecx, %ecx
je .LBB0_16
# %bb.26:
movsd .LCPI0_2(%rip), %xmm3 # xmm3 = mem[0],zero
.p2align 4, 0x90
.LBB0_27: # %.lr.ph116
# =>This Inner Loop Header: Depth=1
mulsd %xmm3, %xmm2
decl %ecx
jne .LBB0_27
.LBB0_16: # %.loopexit
testb %al, %al
jne .LBB0_17
.LBB0_30: # %.loopexit
divsd %xmm2, %xmm1
mulsd %xmm1, %xmm0
retq
.LBB0_15:
movsd .LCPI0_0(%rip), %xmm2 # xmm2 = mem[0],zero
movb $1, %al
testb %al, %al
je .LBB0_30
.LBB0_17:
mulsd %xmm2, %xmm1
mulsd %xmm1, %xmm0
retq
.Lfunc_end0:
.size _Z5atoffPKc, .Lfunc_end0-_Z5atoffPKc
.cfi_endproc
.section .rodata,"a",@progbits
.p2align 3, 0x0
.LJTI0_0:
.quad .LBB0_3
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_3
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_5
.quad .LBB0_6
.quad .LBB0_4
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001ba979_00000000-6_atof.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z5atoffPKc
.type _Z5atoffPKc, @function
_Z5atoffPKc:
.LFB2027:
.cfi_startproc
endbr64
movzbl (%rdi), %eax
cmpb $9, %al
je .L37
cmpb $32, %al
jne .L4
.L37:
addq $1, %rdi
movzbl (%rdi), %eax
cmpb $9, %al
je .L37
cmpb $32, %al
je .L37
.L4:
cmpb $45, %al
je .L38
movsd .LC1(%rip), %xmm0
cmpb $43, %al
je .L39
.L8:
movzbl (%rdi), %eax
leal -48(%rax), %edx
cmpb $9, %dl
ja .L26
pxor %xmm1, %xmm1
movsd .LC3(%rip), %xmm3
.L10:
mulsd %xmm3, %xmm1
subl $48, %eax
movsbl %al, %eax
pxor %xmm2, %xmm2
cvtsi2sdl %eax, %xmm2
addsd %xmm2, %xmm1
addq $1, %rdi
movzbl (%rdi), %eax
leal -48(%rax), %edx
cmpb $9, %dl
jbe .L10
.L9:
cmpb $46, %al
je .L40
.L11:
movzbl (%rdi), %eax
andl $-33, %eax
cmpb $69, %al
jne .L28
movzbl 1(%rdi), %eax
cmpb $45, %al
je .L41
cmpb $43, %al
je .L16
addq $1, %rdi
movl $0, %esi
.L15:
movzbl (%rdi), %eax
leal -48(%rax), %edx
cmpb $9, %dl
ja .L29
movl $0, %edx
.L18:
leal (%rdx,%rdx,4), %edx
movsbl %al, %eax
leal -48(%rax,%rdx,2), %edx
addq $1, %rdi
movzbl (%rdi), %eax
leal -48(%rax), %ecx
cmpb $9, %cl
jbe .L18
movl $308, %eax
cmpl %eax, %edx
cmovbe %edx, %eax
cmpl $49, %edx
jbe .L30
movsd .LC1(%rip), %xmm2
movsd .LC4(%rip), %xmm3
.L20:
mulsd %xmm3, %xmm2
subl $50, %eax
cmpl $49, %eax
ja .L20
.L19:
cmpl $7, %eax
jbe .L17
movl %eax, %edx
movsd .LC5(%rip), %xmm3
.L21:
mulsd %xmm3, %xmm2
subl $8, %edx
cmpl $7, %edx
ja .L21
andl $7, %eax
.L17:
testl %eax, %eax
je .L22
movsd .LC3(%rip), %xmm3
.L23:
mulsd %xmm3, %xmm2
subl $1, %eax
jne .L23
.L22:
testl %esi, %esi
je .L13
divsd %xmm2, %xmm1
jmp .L24
.L38:
addq $1, %rdi
movsd .LC0(%rip), %xmm0
jmp .L8
.L39:
addq $1, %rdi
jmp .L8
.L26:
pxor %xmm1, %xmm1
jmp .L9
.L40:
leaq 1(%rdi), %rdx
movzbl 1(%rdi), %eax
leal -48(%rax), %ecx
cmpb $9, %cl
ja .L27
movsd .LC3(%rip), %xmm3
movapd %xmm3, %xmm4
.L12:
subl $48, %eax
movsbl %al, %eax
pxor %xmm2, %xmm2
cvtsi2sdl %eax, %xmm2
divsd %xmm3, %xmm2
addsd %xmm2, %xmm1
mulsd %xmm4, %xmm3
addq $1, %rdx
movzbl (%rdx), %eax
leal -48(%rax), %ecx
cmpb $9, %cl
jbe .L12
movq %rdx, %rdi
jmp .L11
.L27:
movq %rdx, %rdi
jmp .L11
.L41:
addq $2, %rdi
movl $1, %esi
jmp .L15
.L16:
addq $2, %rdi
movl $0, %esi
jmp .L15
.L30:
movsd .LC1(%rip), %xmm2
jmp .L19
.L29:
movl $0, %eax
movsd .LC1(%rip), %xmm2
jmp .L17
.L28:
movsd .LC1(%rip), %xmm2
.L13:
mulsd %xmm2, %xmm1
.L24:
mulsd %xmm1, %xmm0
ret
.cfi_endproc
.LFE2027:
.size _Z5atoffPKc, .-_Z5atoffPKc
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long -1074790400
.align 8
.LC1:
.long 0
.long 1072693248
.align 8
.LC3:
.long 0
.long 1076101120
.align 8
.LC4:
.long -981572454
.long 1246829326
.align 8
.LC5:
.long 0
.long 1100470148
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "atof.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z5atoffPKc
.LCPI0_0:
.quad 0x3ff0000000000000 # double 1
.LCPI0_1:
.quad 0xbff0000000000000 # double -1
.LCPI0_2:
.quad 0x4024000000000000 # double 10
.LCPI0_3:
.quad 0x4a511b0ec57e649a # double 1.0000000000000001E+50
.LCPI0_4:
.quad 0x4197d78400000000 # double 1.0E+8
.text
.globl _Z5atoffPKc
.p2align 4, 0x90
.type _Z5atoffPKc,@function
_Z5atoffPKc: # @_Z5atoffPKc
.cfi_startproc
# %bb.0:
.p2align 4, 0x90
.LBB0_1: # =>This Inner Loop Header: Depth=1
movzbl (%rdi), %eax
addl $-9, %eax
cmpl $36, %eax
ja .LBB0_6
# %bb.2: # in Loop: Header=BB0_1 Depth=1
jmpq *.LJTI0_0(,%rax,8)
.LBB0_3: # %.critedge
# in Loop: Header=BB0_1 Depth=1
incq %rdi
jmp .LBB0_1
.LBB0_5:
incq %rdi
.LBB0_6: # %.loopexit82.loopexit
movsd .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero
jmp .LBB0_7
.LBB0_4:
incq %rdi
movsd .LCPI0_1(%rip), %xmm0 # xmm0 = mem[0],zero
.LBB0_7: # %.loopexit82
movzbl (%rdi), %eax
leal -48(%rax), %ecx
xorpd %xmm1, %xmm1
cmpb $9, %cl
ja .LBB0_10
# %bb.8: # %.lr.ph.preheader
movsd .LCPI0_2(%rip), %xmm2 # xmm2 = mem[0],zero
.p2align 4, 0x90
.LBB0_9: # %.lr.ph
# =>This Inner Loop Header: Depth=1
mulsd %xmm2, %xmm1
addb $-48, %al
movsbl %al, %eax
xorps %xmm3, %xmm3
cvtsi2sd %eax, %xmm3
addsd %xmm3, %xmm1
movzbl 1(%rdi), %eax
incq %rdi
leal -48(%rax), %ecx
cmpb $10, %cl
jb .LBB0_9
.LBB0_10: # %.critedge2
cmpb $46, %al
jne .LBB0_14
# %bb.11: # %.preheader81
movzbl 1(%rdi), %eax
incq %rdi
leal -48(%rax), %ecx
cmpb $9, %cl
ja .LBB0_14
# %bb.12: # %.lr.ph95.preheader
movsd .LCPI0_2(%rip), %xmm2 # xmm2 = mem[0],zero
movapd %xmm2, %xmm3
.p2align 4, 0x90
.LBB0_13: # %.lr.ph95
# =>This Inner Loop Header: Depth=1
addb $-48, %al
movsbl %al, %eax
xorps %xmm4, %xmm4
cvtsi2sd %eax, %xmm4
divsd %xmm3, %xmm4
addsd %xmm4, %xmm1
mulsd %xmm2, %xmm3
movzbl 1(%rdi), %eax
incq %rdi
leal -48(%rax), %ecx
cmpb $10, %cl
jb .LBB0_13
.LBB0_14: # %.critedge4
movzbl (%rdi), %eax
orl $32, %eax
cmpl $101, %eax
jne .LBB0_15
# %bb.18:
leaq 1(%rdi), %rcx
movzbl 1(%rdi), %eax
leaq 2(%rdi), %rdx
xorl %esi, %esi
cmpb $43, %al
sete %sil
cmoveq %rdx, %rcx
incq %rsi
cmpb $45, %al
setne %al
movl $2, %r8d
cmovneq %rsi, %r8
cmoveq %rdx, %rcx
movzbl (%rdi,%r8), %edx
leal -48(%rdx), %edi
xorl %esi, %esi
cmpb $9, %dil
ja .LBB0_21
# %bb.19: # %.lr.ph101.preheader
incq %rcx
xorl %esi, %esi
.p2align 4, 0x90
.LBB0_20: # %.lr.ph101
# =>This Inner Loop Header: Depth=1
leal (%rsi,%rsi,4), %esi
addb $-48, %dl
movzbl %dl, %edx
leal (%rdx,%rsi,2), %esi
movzbl (%rcx), %edx
leal -48(%rdx), %edi
incq %rcx
cmpb $10, %dil
jb .LBB0_20
.LBB0_21: # %.critedge6
cmpl $308, %esi # imm = 0x134
movl $308, %ecx # imm = 0x134
cmovbl %esi, %ecx
movsd .LCPI0_0(%rip), %xmm2 # xmm2 = mem[0],zero
cmpl $50, %esi
jb .LBB0_22
# %bb.28: # %.lr.ph105.preheader
movsd .LCPI0_3(%rip), %xmm3 # xmm3 = mem[0],zero
.p2align 4, 0x90
.LBB0_29: # %.lr.ph105
# =>This Inner Loop Header: Depth=1
mulsd %xmm3, %xmm2
addl $-50, %ecx
cmpl $49, %ecx
ja .LBB0_29
.LBB0_22: # %.preheader80
cmpl $8, %ecx
jb .LBB0_25
# %bb.23:
movsd .LCPI0_4(%rip), %xmm3 # xmm3 = mem[0],zero
.p2align 4, 0x90
.LBB0_24: # %.lr.ph110
# =>This Inner Loop Header: Depth=1
mulsd %xmm3, %xmm2
addl $-8, %ecx
cmpl $7, %ecx
ja .LBB0_24
.LBB0_25: # %.preheader
testl %ecx, %ecx
je .LBB0_16
# %bb.26:
movsd .LCPI0_2(%rip), %xmm3 # xmm3 = mem[0],zero
.p2align 4, 0x90
.LBB0_27: # %.lr.ph116
# =>This Inner Loop Header: Depth=1
mulsd %xmm3, %xmm2
decl %ecx
jne .LBB0_27
.LBB0_16: # %.loopexit
testb %al, %al
jne .LBB0_17
.LBB0_30: # %.loopexit
divsd %xmm2, %xmm1
mulsd %xmm1, %xmm0
retq
.LBB0_15:
movsd .LCPI0_0(%rip), %xmm2 # xmm2 = mem[0],zero
movb $1, %al
testb %al, %al
je .LBB0_30
.LBB0_17:
mulsd %xmm2, %xmm1
mulsd %xmm1, %xmm0
retq
.Lfunc_end0:
.size _Z5atoffPKc, .Lfunc_end0-_Z5atoffPKc
.cfi_endproc
.section .rodata,"a",@progbits
.p2align 3, 0x0
.LJTI0_0:
.quad .LBB0_3
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_3
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_6
.quad .LBB0_5
.quad .LBB0_6
.quad .LBB0_4
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void getInversePermutationKernel(int size, int *original, int *inverse)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
inverse[original[idx]] = idx;
}
} | code for sm_80
Function : _Z27getInversePermutationKerneliPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x160], PT ; /* 0x0000580007007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R7, R4, c[0x0][0x168] ; /* 0x00005a0007027625 */
/* 0x000fcc00078e0204 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IMAD.WIDE R4, R2, R4, c[0x0][0x170] ; /* 0x00005c0002047625 */
/* 0x004fca00078e0204 */
/*00b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void getInversePermutationKernel(int size, int *original, int *inverse)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
inverse[original[idx]] = idx;
}
} | .file "tmpxft_0006bb81_00000000-6_getInversePermutationKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_
.type _Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_, @function
_Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z27getInversePermutationKerneliPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_, .-_Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_
.globl _Z27getInversePermutationKerneliPiS_
.type _Z27getInversePermutationKerneliPiS_, @function
_Z27getInversePermutationKerneliPiS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z27getInversePermutationKerneliPiS_, .-_Z27getInversePermutationKerneliPiS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z27getInversePermutationKerneliPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z27getInversePermutationKerneliPiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void getInversePermutationKernel(int size, int *original, int *inverse)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
inverse[original[idx]] = idx;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void getInversePermutationKernel(int size, int *original, int *inverse)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
inverse[original[idx]] = idx;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void getInversePermutationKernel(int size, int *original, int *inverse)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
inverse[original[idx]] = idx;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z27getInversePermutationKerneliPiS_
.globl _Z27getInversePermutationKerneliPiS_
.p2align 8
.type _Z27getInversePermutationKerneliPiS_,@function
_Z27getInversePermutationKerneliPiS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_store_b32 v[2:3], v1, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27getInversePermutationKerneliPiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z27getInversePermutationKerneliPiS_, .Lfunc_end0-_Z27getInversePermutationKerneliPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27getInversePermutationKerneliPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z27getInversePermutationKerneliPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void getInversePermutationKernel(int size, int *original, int *inverse)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
inverse[original[idx]] = idx;
}
} | .text
.file "getInversePermutationKernel.hip"
.globl _Z42__device_stub__getInversePermutationKerneliPiS_ # -- Begin function _Z42__device_stub__getInversePermutationKerneliPiS_
.p2align 4, 0x90
.type _Z42__device_stub__getInversePermutationKerneliPiS_,@function
_Z42__device_stub__getInversePermutationKerneliPiS_: # @_Z42__device_stub__getInversePermutationKerneliPiS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z27getInversePermutationKerneliPiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z42__device_stub__getInversePermutationKerneliPiS_, .Lfunc_end0-_Z42__device_stub__getInversePermutationKerneliPiS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z27getInversePermutationKerneliPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z27getInversePermutationKerneliPiS_,@object # @_Z27getInversePermutationKerneliPiS_
.section .rodata,"a",@progbits
.globl _Z27getInversePermutationKerneliPiS_
.p2align 3, 0x0
_Z27getInversePermutationKerneliPiS_:
.quad _Z42__device_stub__getInversePermutationKerneliPiS_
.size _Z27getInversePermutationKerneliPiS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z27getInversePermutationKerneliPiS_"
.size .L__unnamed_1, 37
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z42__device_stub__getInversePermutationKerneliPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z27getInversePermutationKerneliPiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z27getInversePermutationKerneliPiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x160], PT ; /* 0x0000580007007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R7, R4, c[0x0][0x168] ; /* 0x00005a0007027625 */
/* 0x000fcc00078e0204 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IMAD.WIDE R4, R2, R4, c[0x0][0x170] ; /* 0x00005c0002047625 */
/* 0x004fca00078e0204 */
/*00b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z27getInversePermutationKerneliPiS_
.globl _Z27getInversePermutationKerneliPiS_
.p2align 8
.type _Z27getInversePermutationKerneliPiS_,@function
_Z27getInversePermutationKerneliPiS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_store_b32 v[2:3], v1, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27getInversePermutationKerneliPiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z27getInversePermutationKerneliPiS_, .Lfunc_end0-_Z27getInversePermutationKerneliPiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27getInversePermutationKerneliPiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z27getInversePermutationKerneliPiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0006bb81_00000000-6_getInversePermutationKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_
.type _Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_, @function
_Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z27getInversePermutationKerneliPiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_, .-_Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_
.globl _Z27getInversePermutationKerneliPiS_
.type _Z27getInversePermutationKerneliPiS_, @function
_Z27getInversePermutationKerneliPiS_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z50__device_stub__Z27getInversePermutationKerneliPiS_iPiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z27getInversePermutationKerneliPiS_, .-_Z27getInversePermutationKerneliPiS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z27getInversePermutationKerneliPiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z27getInversePermutationKerneliPiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "getInversePermutationKernel.hip"
.globl _Z42__device_stub__getInversePermutationKerneliPiS_ # -- Begin function _Z42__device_stub__getInversePermutationKerneliPiS_
.p2align 4, 0x90
.type _Z42__device_stub__getInversePermutationKerneliPiS_,@function
_Z42__device_stub__getInversePermutationKerneliPiS_: # @_Z42__device_stub__getInversePermutationKerneliPiS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z27getInversePermutationKerneliPiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z42__device_stub__getInversePermutationKerneliPiS_, .Lfunc_end0-_Z42__device_stub__getInversePermutationKerneliPiS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z27getInversePermutationKerneliPiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z27getInversePermutationKerneliPiS_,@object # @_Z27getInversePermutationKerneliPiS_
.section .rodata,"a",@progbits
.globl _Z27getInversePermutationKerneliPiS_
.p2align 3, 0x0
_Z27getInversePermutationKerneliPiS_:
.quad _Z42__device_stub__getInversePermutationKerneliPiS_
.size _Z27getInversePermutationKerneliPiS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z27getInversePermutationKerneliPiS_"
.size .L__unnamed_1, 37
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z42__device_stub__getInversePermutationKerneliPiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z27getInversePermutationKerneliPiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define m(data,y,x) data[y*n+x]
#define MAX_THREADS 1024
// ===========================> Functions Prototype <===============================
void fill(float* data, int size);
double calc_mse(float* data1, float* data2, int size);
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x);
void cpuKernel_y(float* a, float* b, float* c, int n, int y);
void cpuKernel(float* a, float* b, float* c, int n);
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n);
void gpuKernel(float* a, float* b, float* c, int n);
// =================================================================================
int main(int argc, char** argv) {
struct cudaDeviceProp p;
cudaGetDeviceProperties(&p, 0);
printf("Device Name: %s\n", p.name);
// get parameter from command line to build Matrix dimension
// check for 10<=m<=13, because m>=14 do not fit in the memory of our GPU, i.e., 1GB.
int m = 5;
int n = 1;
if(argc > 1)
m = atoi(argv[1]);
for (int i=0;i<m;i++)
n*=2; // n=2^m
// allocate memory in CPU for calculation
float* a;
float* b;
float* c_serial;
float* c;
a = (float*)malloc(n*n * sizeof(float));
b = (float*)malloc(n*n * sizeof(float));
c_serial = (float*)malloc(n*n * sizeof(float));
c = (float*)malloc(n*n * sizeof(float));
// fill a, b matrices with random values between -16.0f and 16.0f
srand(0);
fill(a, n*n);
fill(b, n*n);
// time measurement for CPU calculation
clock_t t0 = clock();
if (m<=10) {
cpuKernel (a, b, c_serial, n);
} else {
cpuKernel_y (a, b, c_serial, n, 0); // 1st row
}
clock_t t1 = clock();
// time measurement for GPU calculation
clock_t t2 = clock();
gpuKernel (a, b, c, n);
clock_t t3 = clock();
// check correctness of calculation
float mse;
if (m<=10) {
mse = calc_mse( c_serial, c, n*n );
} else {
mse = calc_mse( c_serial, c, n ); // 1st row
}
printf("n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n",n, (t1-t0)/1000, (t3-t2)/1000, mse);
// free allocated memory for later use
free(a);
free(b);
free(c_serial);
free(c);
return 0;
}
//-----------------------------------------------------------------------------
void fill(float* data, int size) {
for (int i=0; i<size; ++i)
data[i] = (float) (rand() % 33 - 16);
}
double calc_mse (float* data1, float* data2, int size) {
double mse = 0.0;
int i; for (i=0; i<size; i++) {
double e = data1[i]-data2[i];
e = e * e;
mse += e;
}
mse = mse / size;
return mse;
}
//-----------------------------------------------------------------------------
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x) { // one element
m(c,y,x)=0;
for(int k=0; k<n; k++) {
m(c,y,x) += m(a,y,k) * m(b,k,x);
}
}
void cpuKernel_y(float* a, float* b, float* c, int n, int y) { // one row
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
void cpuKernel(float* a, float* b, float* c, int n) { // entire matrix
for(int y=0; y<n; y++)
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
//-----------------------------------------------------------------------------
/*__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
// write your GPU kernel function here
// note that maximum # of threads per block is 1024
int bx=blockIdx.x;
int by=blockIdx.y;
int bz=blockIdx.z;
int tx=threadIdx.x;
long i,j;
float sum=0;
i= bx+(by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)/n;
j= (by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)%n;
for(long k=0; k<n; k++) {
sum += m(ad,i,k) * m(bd,k,j);
}
m(cd,i,j) = sum;
}*/
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
int tx = threadIdx.x;
int by = blockIdx.y;
int bx = blockIdx.x;
int row = bx;
float s = 0.0f;
int column;
column = (by)*(blockDim.x)+tx;
int k; for (k=0; k<n; k++)
s += m(ad,row,k) * m(bd,k,column);
m(cd,row,column) = s;
}
//-----------------------------------------------------------------------------
void gpuKernel(float* a, float* b, float* c, int n) {
// allocate memory on GPU
// copy data to GPU
// call kernelFunc
// copy the results back to CPU
// free GPU memory
float *ad, *bd, *cd;
cudaMalloc((void**)&ad, n*n*sizeof(float));
cudaMalloc((void**)&bd, n*n*sizeof(float));
cudaMalloc((void**)&cd, n*n*sizeof(float));
cudaMemcpy(ad, a, n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, n*n*sizeof(float), cudaMemcpyHostToDevice);
//kernelFunc <<<dim3(n/MAX_THREADS,n/MAX_THREADS,MAX_THREADS),MAX_THREADS>>> (ad,bd,cd,n);
kernelFunc<<< dim3(n,1,1), n >>>(ad, bd, cd, n);
cudaMemcpy(c, cd, n*n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
} | code for sm_80
Function : _Z10kernelFuncPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e220000002600 */
/*0020*/ MOV R0, c[0x0][0x178] ; /* 0x00005e0000007a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fc60003f06270 */
/*0070*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e620000002500 */
/*0080*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fe400078e0203 */
/*0090*/ IMAD R4, R4, c[0x0][0x178], RZ ; /* 0x00005e0004047a24 */
/* 0x002fce00078e02ff */
/*00a0*/ @!P0 BRA 0xb90 ; /* 0x00000ae000008947 */
/* 0x000fea0003800000 */
/*00b0*/ IADD3 R3, R0.reuse, -0x1, RZ ; /* 0xffffffff00037810 */
/* 0x040fe40007ffe0ff */
/*00c0*/ LOP3.LUT R5, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300057812 */
/* 0x000fe400078ec0ff */
/*00d0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fe40003f06070 */
/*00e0*/ MOV R21, RZ ; /* 0x000000ff00157202 */
/* 0x000fe40000000f00 */
/*00f0*/ MOV R3, RZ ; /* 0x000000ff00037202 */
/* 0x000fd20000000f00 */
/*0100*/ @!P0 BRA 0xa90 ; /* 0x0000098000008947 */
/* 0x000fea0003800000 */
/*0110*/ IADD3 R6, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005067a10 */
/* 0x000fe20007ffe1ff */
/*0120*/ HFMA2.MMA R19, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff137435 */
/* 0x000fe200000001ff */
/*0130*/ MOV R21, RZ ; /* 0x000000ff00157202 */
/* 0x000fe20000000f00 */
/*0140*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */
/* 0x000fe200000001ff */
/*0150*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f04270 */
/*0160*/ MOV R8, c[0x0][0x160] ; /* 0x0000580000087a02 */
/* 0x000fe40000000f00 */
/*0170*/ MOV R7, c[0x0][0x164] ; /* 0x0000590000077a02 */
/* 0x000fc60000000f00 */
/*0180*/ IMAD.WIDE R18, R2, R19, c[0x0][0x168] ; /* 0x00005a0002127625 */
/* 0x000fcc00078e0213 */
/*0190*/ @!P0 BRA 0x910 ; /* 0x0000077000008947 */
/* 0x000fea0003800000 */
/*01a0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe40003f24270 */
/*01b0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01c0*/ @!P1 BRA 0x660 ; /* 0x0000049000009947 */
/* 0x000fea0003800000 */
/*01d0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*01e0*/ MOV R9, R7 ; /* 0x0000000700097202 */
/* 0x000fe20000000f00 */
/*01f0*/ LDG.E R10, [R18.64] ; /* 0x00000004120a7981 */
/* 0x0000a8000c1e1900 */
/*0200*/ IMAD.WIDE R12, R4, 0x4, R8 ; /* 0x00000004040c7825 */
/* 0x000fc800078e0208 */
/*0210*/ IMAD.WIDE R22, R0.reuse, 0x4, R18 ; /* 0x0000000400167825 */
/* 0x040fe200078e0212 */
/*0220*/ LDG.E R25, [R12.64] ; /* 0x000000040c197981 */
/* 0x000ea8000c1e1900 */
/*0230*/ LDG.E R24, [R22.64] ; /* 0x0000000416187981 */
/* 0x0002e8000c1e1900 */
/*0240*/ LDG.E R11, [R12.64+0x4] ; /* 0x000004040c0b7981 */
/* 0x000ee8000c1e1900 */
/*0250*/ LDG.E R14, [R12.64+0x8] ; /* 0x000008040c0e7981 */
/* 0x000f22000c1e1900 */
/*0260*/ IMAD.WIDE R22, R0, 0x4, R22 ; /* 0x0000000400167825 */
/* 0x002fc600078e0216 */
/*0270*/ LDG.E R19, [R12.64+0x10] ; /* 0x000010040c137981 */
/* 0x001f68000c1e1900 */
/*0280*/ LDG.E R15, [R22.64] ; /* 0x00000004160f7981 */
/* 0x000122000c1e1900 */
/*0290*/ IMAD.WIDE R16, R0, 0x4, R22 ; /* 0x0000000400107825 */
/* 0x000fc600078e0216 */
/*02a0*/ LDG.E R9, [R12.64+0x14] ; /* 0x000014040c097981 */
/* 0x000f66000c1e1900 */
/*02b0*/ IMAD.WIDE R26, R0.reuse, 0x4, R16 ; /* 0x00000004001a7825 */
/* 0x040fe400078e0210 */
/*02c0*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000368000c1e1900 */
/*02d0*/ IMAD.WIDE R28, R0.reuse, 0x4, R26 ; /* 0x00000004001c7825 */
/* 0x040fe200078e021a */
/*02e0*/ LDG.E R18, [R26.64] ; /* 0x000000041a127981 */
/* 0x000168000c1e1900 */
/*02f0*/ LDG.E R20, [R28.64] ; /* 0x000000041c147981 */
/* 0x000968000c1e1900 */
/*0300*/ LDG.E R17, [R12.64+0xc] ; /* 0x00000c040c117981 */
/* 0x002f62000c1e1900 */
/*0310*/ IMAD.WIDE R22, R0, 0x4, R28 ; /* 0x0000000400167825 */
/* 0x001fc600078e021c */
/*0320*/ LDG.E R26, [R12.64+0x1c] ; /* 0x00001c040c1a7981 */
/* 0x000f62000c1e1900 */
/*0330*/ FFMA R25, R10, R25, R21 ; /* 0x000000190a197223 */
/* 0x004fc60000000015 */
/*0340*/ LDG.E R10, [R22.64] ; /* 0x00000004160a7981 */
/* 0x0000a8000c1e1900 */
/*0350*/ LDG.E R21, [R12.64+0x18] ; /* 0x000018040c157981 */
/* 0x000ea2000c1e1900 */
/*0360*/ IMAD.WIDE R22, R0, 0x4, R22 ; /* 0x0000000400167825 */
/* 0x001fc800078e0216 */
/*0370*/ FFMA R27, R24, R11, R25 ; /* 0x0000000b181b7223 */
/* 0x008fe40000000019 */
/*0380*/ IMAD.WIDE R24, R0.reuse, 0x4, R22 ; /* 0x0000000400187825 */
/* 0x040fe200078e0216 */
/*0390*/ LDG.E R11, [R22.64] ; /* 0x00000004160b7981 */
/* 0x0000e6000c1e1900 */
/*03a0*/ FFMA R29, R15, R14, R27 ; /* 0x0000000e0f1d7223 */
/* 0x010fe2000000001b */
/*03b0*/ LDG.E R28, [R24.64] ; /* 0x00000004181c7981 */
/* 0x000328000c1e1900 */
/*03c0*/ LDG.E R27, [R12.64+0x20] ; /* 0x000020040c1b7981 */
/* 0x000f22000c1e1900 */
/*03d0*/ IMAD.WIDE R14, R0, 0x4, R24 ; /* 0x00000004000e7825 */
/* 0x000fc600078e0218 */
/*03e0*/ LDG.E R25, [R12.64+0x28] ; /* 0x000028040c197981 */
/* 0x002f22000c1e1900 */
/*03f0*/ FFMA R29, R16, R17, R29 ; /* 0x00000011101d7223 */
/* 0x020fe4000000001d */
/*0400*/ IMAD.WIDE R16, R0, 0x4, R14 ; /* 0x0000000400107825 */
/* 0x000fe400078e020e */
/*0410*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000364000c1e1900 */
/*0420*/ FFMA R29, R18, R19, R29 ; /* 0x00000013121d7223 */
/* 0x000fe4000000001d */
/*0430*/ IMAD.WIDE R18, R0, 0x4, R16 ; /* 0x0000000400127825 */
/* 0x000fe400078e0210 */
/*0440*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000364000c1e1900 */
/*0450*/ FFMA R20, R20, R9, R29 ; /* 0x0000000914147223 */
/* 0x000fc4000000001d */
/*0460*/ LDG.E R9, [R12.64+0x24] ; /* 0x000024040c097981 */
/* 0x000f62000c1e1900 */
/*0470*/ IMAD.WIDE R22, R0, 0x4, R18 ; /* 0x0000000400167825 */
/* 0x001fc600078e0212 */
/*0480*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */
/* 0x000768000c1e1900 */
/*0490*/ LDG.E R15, [R12.64+0x2c] ; /* 0x00002c040c0f7981 */
/* 0x002f68000c1e1900 */
/*04a0*/ LDG.E R29, [R22.64] ; /* 0x00000004161d7981 */
/* 0x000162000c1e1900 */
/*04b0*/ FFMA R10, R10, R21, R20 ; /* 0x000000150a0a7223 */
/* 0x004fe40000000014 */
/*04c0*/ IMAD.WIDE R20, R0, 0x4, R22 ; /* 0x0000000400147825 */
/* 0x000fc400078e0216 */
/*04d0*/ LDG.E R22, [R12.64+0x38] ; /* 0x000038040c167981 */
/* 0x001ea8000c1e1900 */
/*04e0*/ LDG.E R17, [R20.64] ; /* 0x0000000414117981 */
/* 0x0008a2000c1e1900 */
/*04f0*/ FFMA R18, R11, R26, R10 ; /* 0x0000001a0b127223 */
/* 0x008fc6000000000a */
/*0500*/ LDG.E R26, [R12.64+0x30] ; /* 0x000030040c1a7981 */
/* 0x000ee2000c1e1900 */
/*0510*/ IMAD.WIDE R10, R0, 0x4, R20 ; /* 0x00000004000a7825 */
/* 0x000fc800078e0214 */
/*0520*/ FFMA R20, R28, R27, R18 ; /* 0x0000001b1c147223 */
/* 0x010fe40000000012 */
/*0530*/ LDG.E R28, [R12.64+0x34] ; /* 0x000034040c1c7981 */
/* 0x000ea2000c1e1900 */
/*0540*/ IMAD.WIDE R18, R0, 0x4, R10 ; /* 0x0000000400127825 */
/* 0x000fc600078e020a */
/*0550*/ LDG.E R27, [R10.64] ; /* 0x000000040a1b7981 */
/* 0x000128000c1e1900 */
/*0560*/ LDG.E R23, [R18.64] ; /* 0x0000000412177981 */
/* 0x000328000c1e1900 */
/*0570*/ LDG.E R10, [R12.64+0x3c] ; /* 0x00003c040c0a7981 */
/* 0x001f22000c1e1900 */
/*0580*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */
/* 0x000fe20007ffe0ff */
/*0590*/ FFMA R9, R14, R9, R20 ; /* 0x000000090e097223 */
/* 0x020fc80000000014 */
/*05a0*/ FFMA R9, R16, R25, R9 ; /* 0x0000001910097223 */
/* 0x000fe20000000009 */
/*05b0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fc60003f24270 */
/*05c0*/ FFMA R9, R24, R15, R9 ; /* 0x0000000f18097223 */
/* 0x000fe20000000009 */
/*05d0*/ IADD3 R8, P2, R8, 0x40, RZ ; /* 0x0000004008087810 */
/* 0x000fe20007f5e0ff */
/*05e0*/ IMAD.WIDE R18, R0, 0x4, R18 ; /* 0x0000000400127825 */
/* 0x002fe200078e0212 */
/*05f0*/ IADD3 R3, R3, 0x10, RZ ; /* 0x0000001003037810 */
/* 0x000fe40007ffe0ff */
/*0600*/ IADD3.X R7, RZ, R7, RZ, P2, !PT ; /* 0x00000007ff077210 */
/* 0x000fe200017fe4ff */
/*0610*/ FFMA R9, R29, R26, R9 ; /* 0x0000001a1d097223 */
/* 0x008fc80000000009 */
/*0620*/ FFMA R9, R17, R28, R9 ; /* 0x0000001c11097223 */
/* 0x004fc80000000009 */
/*0630*/ FFMA R9, R27, R22, R9 ; /* 0x000000161b097223 */
/* 0x010fc80000000009 */
/*0640*/ FFMA R21, R23, R10, R9 ; /* 0x0000000a17157223 */
/* 0x000fe20000000009 */
/*0650*/ @P1 BRA 0x1e0 ; /* 0xfffffb8000001947 */
/* 0x000fea000383ffff */
/*0660*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*0670*/ @!P1 BRA 0x8f0 ; /* 0x0000027000009947 */
/* 0x000fea0003800000 */
/*0680*/ MOV R9, R7 ; /* 0x0000000700097202 */
/* 0x000fe20000000f00 */
/*0690*/ LDG.E R20, [R18.64] ; /* 0x0000000412147981 */
/* 0x0000a8000c1e1900 */
/*06a0*/ IMAD.WIDE R10, R4, 0x4, R8 ; /* 0x00000004040a7825 */
/* 0x000fca00078e0208 */
/*06b0*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */
/* 0x000ea2000c1e1900 */
/*06c0*/ IMAD.WIDE R22, R0, 0x4, R18 ; /* 0x0000000400167825 */
/* 0x000fc600078e0212 */
/*06d0*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee6000c1e1900 */
/*06e0*/ IMAD.WIDE R12, R0.reuse, 0x4, R22 ; /* 0x00000004000c7825 */
/* 0x040fe200078e0216 */
/*06f0*/ LDG.E R24, [R22.64] ; /* 0x0000000416187981 */
/* 0x0002e8000c1e1900 */
/*0700*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */
/* 0x000962000c1e1900 */
/*0710*/ IMAD.WIDE R14, R0, 0x4, R12 ; /* 0x00000004000e7825 */
/* 0x000fc600078e020c */
/*0720*/ LDG.E R27, [R10.64+0x8] ; /* 0x000008040a1b7981 */
/* 0x000f66000c1e1900 */
/*0730*/ IMAD.WIDE R16, R0.reuse, 0x4, R14 ; /* 0x0000000400107825 */
/* 0x040fe200078e020e */
/*0740*/ LDG.E R29, [R10.64+0xc] ; /* 0x00000c040a1d7981 */
/* 0x000f68000c1e1900 */
/*0750*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000962000c1e1900 */
/*0760*/ IMAD.WIDE R18, R0, 0x4, R16 ; /* 0x0000000400127825 */
/* 0x001fc600078e0210 */
/*0770*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000166000c1e1900 */
/*0780*/ IMAD.WIDE R22, R0.reuse, 0x4, R18 ; /* 0x0000000400167825 */
/* 0x042fe200078e0212 */
/*0790*/ LDG.E R28, [R18.64] ; /* 0x00000004121c7981 */
/* 0x000368000c1e1900 */
/*07a0*/ LDG.E R15, [R10.64+0x10] ; /* 0x000010040a0f7981 */
/* 0x010f22000c1e1900 */
/*07b0*/ IMAD.WIDE R12, R0, 0x4, R22 ; /* 0x00000004000c7825 */
/* 0x000fc600078e0216 */
/*07c0*/ LDG.E R18, [R10.64+0x1c] ; /* 0x00001c040a127981 */
/* 0x002f28000c1e1900 */
/*07d0*/ LDG.E R17, [R12.64] ; /* 0x000000040c117981 */
/* 0x001f22000c1e1900 */
/*07e0*/ FFMA R19, R20, R9, R21 ; /* 0x0000000914137223 */
/* 0x004fc60000000015 */
/*07f0*/ LDG.E R21, [R10.64+0x14] ; /* 0x000014040a157981 */
/* 0x000ea8000c1e1900 */
/*0800*/ LDG.E R9, [R22.64] ; /* 0x0000000416097981 */
/* 0x000ea8000c1e1900 */
/*0810*/ LDG.E R20, [R10.64+0x18] ; /* 0x000018040a147981 */
/* 0x000ea2000c1e1900 */
/*0820*/ FFMA R24, R24, R25, R19 ; /* 0x0000001918187223 */
/* 0x008fc80000000013 */
/*0830*/ FFMA R24, R26, R27, R24 ; /* 0x0000001b1a187223 */
/* 0x020fc80000000018 */
/*0840*/ FFMA R14, R14, R29, R24 ; /* 0x0000001d0e0e7223 */
/* 0x000fe20000000018 */
/*0850*/ IADD3 R8, P1, R8, 0x20, RZ ; /* 0x0000002008087810 */
/* 0x000fc60007f3e0ff */
/*0860*/ FFMA R14, R16, R15, R14 ; /* 0x0000000f100e7223 */
/* 0x010fe2000000000e */
/*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0880*/ IADD3 R3, R3, 0x8, RZ ; /* 0x0000000803037810 */
/* 0x000fe40007ffe0ff */
/*0890*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */
/* 0x000fe40007ffe0ff */
/*08a0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*08b0*/ FFMA R14, R28, R21, R14 ; /* 0x000000151c0e7223 */
/* 0x004fc8000000000e */
/*08c0*/ FFMA R9, R9, R20, R14 ; /* 0x0000001409097223 */
/* 0x000fc8000000000e */
/*08d0*/ FFMA R21, R17, R18, R9 ; /* 0x0000001211157223 */
/* 0x000fe40000000009 */
/*08e0*/ IMAD.WIDE R18, R0, 0x4, R12 ; /* 0x0000000400127825 */
/* 0x000fc800078e020c */
/*08f0*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0000705670 */
/*0900*/ @!P0 BRA 0xa90 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*0910*/ MOV R9, R7 ; /* 0x0000000700097202 */
/* 0x000fe20000000f00 */
/*0920*/ IMAD.WIDE R12, R0, 0x4, R18 ; /* 0x00000004000c7825 */
/* 0x000fe400078e0212 */
/*0930*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ea4000c1e1900 */
/*0940*/ IMAD.WIDE R10, R4, 0x4, R8 ; /* 0x00000004040a7825 */
/* 0x000fc800078e0208 */
/*0950*/ IMAD.WIDE R14, R0.reuse, 0x4, R12 ; /* 0x00000004000e7825 */
/* 0x040fe200078e020c */
/*0960*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */
/* 0x000ea8000c1e1900 */
/*0970*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000ee2000c1e1900 */
/*0980*/ IMAD.WIDE R16, R0, 0x4, R14 ; /* 0x0000000400107825 */
/* 0x000fc600078e020e */
/*0990*/ LDG.E R20, [R10.64+0x4] ; /* 0x000004040a147981 */
/* 0x000ee8000c1e1900 */
/*09a0*/ LDG.E R22, [R14.64] ; /* 0x000000040e167981 */
/* 0x000f28000c1e1900 */
/*09b0*/ LDG.E R23, [R10.64+0x8] ; /* 0x000008040a177981 */
/* 0x000f28000c1e1900 */
/*09c0*/ LDG.E R25, [R10.64+0xc] ; /* 0x00000c040a197981 */
/* 0x000f68000c1e1900 */
/*09d0*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */
/* 0x000f62000c1e1900 */
/*09e0*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */
/* 0x000fc80007ffe0ff */
/*09f0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f05270 */
/*0a00*/ IADD3 R8, P1, R8, 0x10, RZ ; /* 0x0000001008087810 */
/* 0x000fe40007f3e0ff */
/*0a10*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */
/* 0x000fe40007ffe0ff */
/*0a20*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0a30*/ FFMA R9, R18, R9, R21 ; /* 0x0000000912097223 */
/* 0x004fc80000000015 */
/*0a40*/ FFMA R9, R12, R20, R9 ; /* 0x000000140c097223 */
/* 0x008fe40000000009 */
/*0a50*/ IMAD.WIDE R18, R0, 0x4, R16 ; /* 0x0000000400127825 */
/* 0x000fc800078e0210 */
/*0a60*/ FFMA R9, R22, R23, R9 ; /* 0x0000001716097223 */
/* 0x010fc80000000009 */
/*0a70*/ FFMA R21, R24, R25, R9 ; /* 0x0000001918157223 */
/* 0x020fe20000000009 */
/*0a80*/ @P0 BRA 0x910 ; /* 0xfffffe8000000947 */
/* 0x000fea000383ffff */
/*0a90*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0aa0*/ @!P0 BRA 0xb90 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0ab0*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */
/* 0x000fe200000001ff */
/*0ac0*/ IADD3 R6, R4, R3, RZ ; /* 0x0000000304067210 */
/* 0x000fe20007ffe0ff */
/*0ad0*/ IMAD R3, R3, c[0x0][0x178], R2 ; /* 0x00005e0003037a24 */
/* 0x000fd000078e0202 */
/*0ae0*/ IMAD.WIDE R6, R6, R8, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0208 */
/*0af0*/ IMAD.WIDE R8, R3, R8, c[0x0][0x168] ; /* 0x00005a0003087625 */
/* 0x000fca00078e0208 */
/*0b00*/ LDG.E R10, [R8.64] ; /* 0x00000004080a7981 */
/* 0x0000a8000c1e1900 */
/*0b10*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */
/* 0x0002a2000c1e1900 */
/*0b20*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */
/* 0x000fc80007ffe0ff */
/*0b30*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0b40*/ IMAD.WIDE R8, R0, 0x4, R8 ; /* 0x0000000400087825 */
/* 0x001fe200078e0208 */
/*0b50*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0b60*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0b70*/ FFMA R21, R10, R3, R21 ; /* 0x000000030a157223 */
/* 0x004fcc0000000015 */
/*0b80*/ @P0 BRA 0xb00 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0b90*/ IADD3 R2, R2, R4, RZ ; /* 0x0000000402027210 */
/* 0x000fe40007ffe0ff */
/*0ba0*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fca0000000f00 */
/*0bb0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */
/* 0x000fca00078e0203 */
/*0bc0*/ STG.E [R2.64], R21 ; /* 0x0000001502007986 */
/* 0x000fe2000c101904 */
/*0bd0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0be0*/ BRA 0xbe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0bf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define m(data,y,x) data[y*n+x]
#define MAX_THREADS 1024
// ===========================> Functions Prototype <===============================
void fill(float* data, int size);
double calc_mse(float* data1, float* data2, int size);
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x);
void cpuKernel_y(float* a, float* b, float* c, int n, int y);
void cpuKernel(float* a, float* b, float* c, int n);
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n);
void gpuKernel(float* a, float* b, float* c, int n);
// =================================================================================
int main(int argc, char** argv) {
struct cudaDeviceProp p;
cudaGetDeviceProperties(&p, 0);
printf("Device Name: %s\n", p.name);
// get parameter from command line to build Matrix dimension
// check for 10<=m<=13, because m>=14 do not fit in the memory of our GPU, i.e., 1GB.
int m = 5;
int n = 1;
if(argc > 1)
m = atoi(argv[1]);
for (int i=0;i<m;i++)
n*=2; // n=2^m
// allocate memory in CPU for calculation
float* a;
float* b;
float* c_serial;
float* c;
a = (float*)malloc(n*n * sizeof(float));
b = (float*)malloc(n*n * sizeof(float));
c_serial = (float*)malloc(n*n * sizeof(float));
c = (float*)malloc(n*n * sizeof(float));
// fill a, b matrices with random values between -16.0f and 16.0f
srand(0);
fill(a, n*n);
fill(b, n*n);
// time measurement for CPU calculation
clock_t t0 = clock();
if (m<=10) {
cpuKernel (a, b, c_serial, n);
} else {
cpuKernel_y (a, b, c_serial, n, 0); // 1st row
}
clock_t t1 = clock();
// time measurement for GPU calculation
clock_t t2 = clock();
gpuKernel (a, b, c, n);
clock_t t3 = clock();
// check correctness of calculation
float mse;
if (m<=10) {
mse = calc_mse( c_serial, c, n*n );
} else {
mse = calc_mse( c_serial, c, n ); // 1st row
}
printf("n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n",n, (t1-t0)/1000, (t3-t2)/1000, mse);
// free allocated memory for later use
free(a);
free(b);
free(c_serial);
free(c);
return 0;
}
//-----------------------------------------------------------------------------
void fill(float* data, int size) {
for (int i=0; i<size; ++i)
data[i] = (float) (rand() % 33 - 16);
}
double calc_mse (float* data1, float* data2, int size) {
double mse = 0.0;
int i; for (i=0; i<size; i++) {
double e = data1[i]-data2[i];
e = e * e;
mse += e;
}
mse = mse / size;
return mse;
}
//-----------------------------------------------------------------------------
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x) { // one element
m(c,y,x)=0;
for(int k=0; k<n; k++) {
m(c,y,x) += m(a,y,k) * m(b,k,x);
}
}
void cpuKernel_y(float* a, float* b, float* c, int n, int y) { // one row
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
void cpuKernel(float* a, float* b, float* c, int n) { // entire matrix
for(int y=0; y<n; y++)
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
//-----------------------------------------------------------------------------
/*__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
// write your GPU kernel function here
// note that maximum # of threads per block is 1024
int bx=blockIdx.x;
int by=blockIdx.y;
int bz=blockIdx.z;
int tx=threadIdx.x;
long i,j;
float sum=0;
i= bx+(by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)/n;
j= (by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)%n;
for(long k=0; k<n; k++) {
sum += m(ad,i,k) * m(bd,k,j);
}
m(cd,i,j) = sum;
}*/
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
int tx = threadIdx.x;
int by = blockIdx.y;
int bx = blockIdx.x;
int row = bx;
float s = 0.0f;
int column;
column = (by)*(blockDim.x)+tx;
int k; for (k=0; k<n; k++)
s += m(ad,row,k) * m(bd,k,column);
m(cd,row,column) = s;
}
//-----------------------------------------------------------------------------
void gpuKernel(float* a, float* b, float* c, int n) {
// allocate memory on GPU
// copy data to GPU
// call kernelFunc
// copy the results back to CPU
// free GPU memory
float *ad, *bd, *cd;
cudaMalloc((void**)&ad, n*n*sizeof(float));
cudaMalloc((void**)&bd, n*n*sizeof(float));
cudaMalloc((void**)&cd, n*n*sizeof(float));
cudaMemcpy(ad, a, n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, n*n*sizeof(float), cudaMemcpyHostToDevice);
//kernelFunc <<<dim3(n/MAX_THREADS,n/MAX_THREADS,MAX_THREADS),MAX_THREADS>>> (ad,bd,cd,n);
kernelFunc<<< dim3(n,1,1), n >>>(ad, bd, cd, n);
cudaMemcpy(c, cd, n*n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
} | .file "tmpxft_0002e861_00000000-6_mm.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2066:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z4fillPfi
.type _Z4fillPfi, @function
_Z4fillPfi:
.LFB2058:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1041204193, %rdx, %rdx
sarq $35, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
movl %edx, %ecx
sall $5, %ecx
addl %ecx, %edx
subl %edx, %eax
subl $16, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2058:
.size _Z4fillPfi, .-_Z4fillPfi
.globl _Z8calc_msePfS_i
.type _Z8calc_msePfS_i, @function
_Z8calc_msePfS_i:
.LFB2059:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L14
movslq %edx, %rcx
salq $2, %rcx
movl $0, %eax
pxor %xmm0, %xmm0
.L13:
movss (%rdi,%rax), %xmm1
subss (%rsi,%rax), %xmm1
cvtss2sd %xmm1, %xmm1
mulsd %xmm1, %xmm1
addsd %xmm1, %xmm0
addq $4, %rax
cmpq %rcx, %rax
jne .L13
.L12:
pxor %xmm1, %xmm1
cvtsi2sdl %edx, %xmm1
divsd %xmm1, %xmm0
ret
.L14:
pxor %xmm0, %xmm0
jmp .L12
.cfi_endproc
.LFE2059:
.size _Z8calc_msePfS_i, .-_Z8calc_msePfS_i
.globl _Z12cpuKernel_yxPfS_S_iii
.type _Z12cpuKernel_yxPfS_S_iii, @function
_Z12cpuKernel_yxPfS_S_iii:
.LFB2060:
.cfi_startproc
endbr64
movq %rdi, %r10
movq %rsi, %r11
imull %ecx, %r8d
leal (%r8,%r9), %eax
cltq
leaq (%rdx,%rax,4), %rsi
movl $0x00000000, (%rsi)
testl %ecx, %ecx
jle .L16
movslq %r8d, %r8
leaq (%rdi,%r8,4), %rax
movslq %ecx, %rcx
leaq 0(,%rcx,4), %rdi
movslq %r9d, %r9
leaq (%r11,%r9,4), %rdx
addq %rcx, %r8
leaq (%r10,%r8,4), %rcx
.L18:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
addss (%rsi), %xmm0
movss %xmm0, (%rsi)
addq $4, %rax
addq %rdi, %rdx
cmpq %rcx, %rax
jne .L18
.L16:
ret
.cfi_endproc
.LFE2060:
.size _Z12cpuKernel_yxPfS_S_iii, .-_Z12cpuKernel_yxPfS_S_iii
.globl _Z11cpuKernel_yPfS_S_ii
.type _Z11cpuKernel_yPfS_S_ii, @function
_Z11cpuKernel_yPfS_S_ii:
.LFB2061:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L25
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movq %rdi, %r12
movq %rsi, %r13
movq %rdx, %r14
movl %ecx, %ebp
movl %r8d, %r15d
movl $0, %ebx
.L22:
movl %ebx, %r9d
movl %r15d, %r8d
movl %ebp, %ecx
movq %r14, %rdx
movq %r13, %rsi
movq %r12, %rdi
call _Z12cpuKernel_yxPfS_S_iii
addl $1, %ebx
cmpl %ebx, %ebp
jne .L22
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2061:
.size _Z11cpuKernel_yPfS_S_ii, .-_Z11cpuKernel_yPfS_S_ii
.globl _Z9cpuKernelPfS_S_i
.type _Z9cpuKernelPfS_S_i, @function
_Z9cpuKernelPfS_S_i:
.LFB2062:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movq %rdi, %r15
movq %rsi, %r14
movq %rdx, %r13
movl %ecx, %ebp
movl $0, %r12d
testl %ecx, %ecx
jle .L28
.L29:
movl $0, %ebx
.L31:
movl %ebx, %r9d
movl %r12d, %r8d
movl %ebp, %ecx
movq %r13, %rdx
movq %r14, %rsi
movq %r15, %rdi
call _Z12cpuKernel_yxPfS_S_iii
movl %ebx, %eax
addl $1, %ebx
cmpl %ebx, %ebp
jne .L31
leal 1(%r12), %edx
cmpl %eax, %r12d
je .L28
movl %edx, %r12d
jmp .L29
.L28:
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _Z9cpuKernelPfS_S_i, .-_Z9cpuKernelPfS_S_i
.globl _Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i
.type _Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i, @function
_Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i:
.LFB2088:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L40
.L36:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L41
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10kernelFuncPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L36
.L41:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i, .-_Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i
.globl _Z10kernelFuncPfS_S_i
.type _Z10kernelFuncPfS_S_i, @function
_Z10kernelFuncPfS_S_i:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z10kernelFuncPfS_S_i, .-_Z10kernelFuncPfS_S_i
.globl _Z9gpuKernelPfS_S_i
.type _Z9gpuKernelPfS_S_i, @function
_Z9gpuKernelPfS_S_i:
.LFB2063:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r14
movq %rsi, %r13
movq %rdx, %r12
movl %ecx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl %ecx, %ebx
imull %ecx, %ebx
movslq %ebx, %rbx
salq $2, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebp, 44(%rsp)
movl $1, 48(%rsp)
movl %ebp, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L48
.L45:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L49
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
movl %ebp, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i
jmp .L45
.L49:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size _Z9gpuKernelPfS_S_i, .-_Z9gpuKernelPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "Device Name: %s\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1080, %rsp
.cfi_def_cfa_offset 1136
movl %edi, %ebx
movq %rsi, %rbp
movq %fs:40, %rax
movq %rax, 1064(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %r12
movl $0, %esi
movq %r12, %rdi
call cudaGetDeviceProperties_v2@PLT
movq %r12, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $5, %edx
cmpl $1, %ebx
jg .L62
.L51:
movl $0, %ebx
movl $1, %ebp
.L53:
addl %ebp, %ebp
addl $1, %ebx
cmpl %edx, %ebx
jne .L53
movl %ebp, %r15d
imull %ebp, %r15d
movslq %r15d, %r14
salq $2, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %r13
movq %r14, %rdi
call malloc@PLT
movq %rax, %r12
movq %r14, %rdi
call malloc@PLT
movq %rax, (%rsp)
movq %r14, %rdi
call malloc@PLT
movq %rax, %r14
movl $0, %edi
call srand@PLT
movl %r15d, %esi
movq %r13, %rdi
call _Z4fillPfi
movl %r15d, %esi
movq %r12, %rdi
call _Z4fillPfi
call clock@PLT
movq %rax, 24(%rsp)
cmpl $10, %ebx
jle .L56
movl $0, %r8d
movl %ebp, %ecx
movq (%rsp), %r15
movq %r15, %rdx
movq %r12, %rsi
movq %r13, %rdi
call _Z11cpuKernel_yPfS_S_ii
call clock@PLT
movq %rax, 8(%rsp)
call clock@PLT
movq %rax, %rbx
movl %ebp, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq %r13, %rdi
call _Z9gpuKernelPfS_S_i
call clock@PLT
movq %rax, 16(%rsp)
movl %ebp, %edx
movq %r14, %rsi
movq %r15, %rdi
call _Z8calc_msePfS_i
cvtsd2ss %xmm0, %xmm0
jmp .L55
.L62:
movq 8(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %edx
testl %eax, %eax
jg .L51
movl $4, %edi
call malloc@PLT
movq %rax, %r13
movl $4, %edi
call malloc@PLT
movq %rax, %r12
movl $4, %edi
call malloc@PLT
movq %rax, (%rsp)
movl $4, %edi
call malloc@PLT
movq %rax, %r14
movl $0, %edi
call srand@PLT
movl $1, %esi
movq %r13, %rdi
call _Z4fillPfi
movl $1, %esi
movq %r12, %rdi
call _Z4fillPfi
call clock@PLT
movq %rax, 24(%rsp)
movl $1, %ebp
movl $1, %r15d
.L56:
movl %ebp, %ecx
movq (%rsp), %rdx
movq %r12, %rsi
movq %r13, %rdi
call _Z9cpuKernelPfS_S_i
call clock@PLT
movq %rax, 8(%rsp)
call clock@PLT
movq %rax, %rbx
movl %ebp, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq %r13, %rdi
call _Z9gpuKernelPfS_S_i
call clock@PLT
movq %rax, 16(%rsp)
movl %r15d, %edx
movq %r14, %rsi
movq (%rsp), %rdi
call _Z8calc_msePfS_i
cvtsd2ss %xmm0, %xmm0
.L55:
cvtss2sd %xmm0, %xmm0
movq 16(%rsp), %rsi
subq %rbx, %rsi
movabsq $2361183241434822607, %rcx
movq %rsi, %rax
imulq %rcx
sarq $7, %rdx
movq %rdx, %r8
sarq $63, %rsi
movq 8(%rsp), %rdi
movq 24(%rsp), %rax
subq %rax, %rdi
movq %rdi, %rax
imulq %rcx
movq %rdx, %rcx
sarq $7, %rcx
sarq $63, %rdi
subq %rdi, %rcx
subq %rsi, %r8
movl %ebp, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq (%rsp), %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
movq 1064(%rsp), %rax
subq %fs:40, %rax
jne .L63
movl $0, %eax
addq $1080, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z10kernelFuncPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z10kernelFuncPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define m(data,y,x) data[y*n+x]
#define MAX_THREADS 1024
// ===========================> Functions Prototype <===============================
void fill(float* data, int size);
double calc_mse(float* data1, float* data2, int size);
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x);
void cpuKernel_y(float* a, float* b, float* c, int n, int y);
void cpuKernel(float* a, float* b, float* c, int n);
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n);
void gpuKernel(float* a, float* b, float* c, int n);
// =================================================================================
int main(int argc, char** argv) {
struct cudaDeviceProp p;
cudaGetDeviceProperties(&p, 0);
printf("Device Name: %s\n", p.name);
// get parameter from command line to build Matrix dimension
// check for 10<=m<=13, because m>=14 do not fit in the memory of our GPU, i.e., 1GB.
int m = 5;
int n = 1;
if(argc > 1)
m = atoi(argv[1]);
for (int i=0;i<m;i++)
n*=2; // n=2^m
// allocate memory in CPU for calculation
float* a;
float* b;
float* c_serial;
float* c;
a = (float*)malloc(n*n * sizeof(float));
b = (float*)malloc(n*n * sizeof(float));
c_serial = (float*)malloc(n*n * sizeof(float));
c = (float*)malloc(n*n * sizeof(float));
// fill a, b matrices with random values between -16.0f and 16.0f
srand(0);
fill(a, n*n);
fill(b, n*n);
// time measurement for CPU calculation
clock_t t0 = clock();
if (m<=10) {
cpuKernel (a, b, c_serial, n);
} else {
cpuKernel_y (a, b, c_serial, n, 0); // 1st row
}
clock_t t1 = clock();
// time measurement for GPU calculation
clock_t t2 = clock();
gpuKernel (a, b, c, n);
clock_t t3 = clock();
// check correctness of calculation
float mse;
if (m<=10) {
mse = calc_mse( c_serial, c, n*n );
} else {
mse = calc_mse( c_serial, c, n ); // 1st row
}
printf("n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n",n, (t1-t0)/1000, (t3-t2)/1000, mse);
// free allocated memory for later use
free(a);
free(b);
free(c_serial);
free(c);
return 0;
}
//-----------------------------------------------------------------------------
void fill(float* data, int size) {
for (int i=0; i<size; ++i)
data[i] = (float) (rand() % 33 - 16);
}
double calc_mse (float* data1, float* data2, int size) {
double mse = 0.0;
int i; for (i=0; i<size; i++) {
double e = data1[i]-data2[i];
e = e * e;
mse += e;
}
mse = mse / size;
return mse;
}
//-----------------------------------------------------------------------------
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x) { // one element
m(c,y,x)=0;
for(int k=0; k<n; k++) {
m(c,y,x) += m(a,y,k) * m(b,k,x);
}
}
void cpuKernel_y(float* a, float* b, float* c, int n, int y) { // one row
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
void cpuKernel(float* a, float* b, float* c, int n) { // entire matrix
for(int y=0; y<n; y++)
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
//-----------------------------------------------------------------------------
/*__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
// write your GPU kernel function here
// note that maximum # of threads per block is 1024
int bx=blockIdx.x;
int by=blockIdx.y;
int bz=blockIdx.z;
int tx=threadIdx.x;
long i,j;
float sum=0;
i= bx+(by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)/n;
j= (by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)%n;
for(long k=0; k<n; k++) {
sum += m(ad,i,k) * m(bd,k,j);
}
m(cd,i,j) = sum;
}*/
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
int tx = threadIdx.x;
int by = blockIdx.y;
int bx = blockIdx.x;
int row = bx;
float s = 0.0f;
int column;
column = (by)*(blockDim.x)+tx;
int k; for (k=0; k<n; k++)
s += m(ad,row,k) * m(bd,k,column);
m(cd,row,column) = s;
}
//-----------------------------------------------------------------------------
void gpuKernel(float* a, float* b, float* c, int n) {
// allocate memory on GPU
// copy data to GPU
// call kernelFunc
// copy the results back to CPU
// free GPU memory
float *ad, *bd, *cd;
cudaMalloc((void**)&ad, n*n*sizeof(float));
cudaMalloc((void**)&bd, n*n*sizeof(float));
cudaMalloc((void**)&cd, n*n*sizeof(float));
cudaMemcpy(ad, a, n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, n*n*sizeof(float), cudaMemcpyHostToDevice);
//kernelFunc <<<dim3(n/MAX_THREADS,n/MAX_THREADS,MAX_THREADS),MAX_THREADS>>> (ad,bd,cd,n);
kernelFunc<<< dim3(n,1,1), n >>>(ad, bd, cd, n);
cudaMemcpy(c, cd, n*n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
} | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#define m(data,y,x) data[y*n+x]
#define MAX_THREADS 1024
// ===========================> Functions Prototype <===============================
void fill(float* data, int size);
double calc_mse(float* data1, float* data2, int size);
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x);
void cpuKernel_y(float* a, float* b, float* c, int n, int y);
void cpuKernel(float* a, float* b, float* c, int n);
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n);
void gpuKernel(float* a, float* b, float* c, int n);
// =================================================================================
int main(int argc, char** argv) {
struct hipDeviceProp_t p;
hipGetDeviceProperties(&p, 0);
printf("Device Name: %s\n", p.name);
// get parameter from command line to build Matrix dimension
// check for 10<=m<=13, because m>=14 do not fit in the memory of our GPU, i.e., 1GB.
int m = 5;
int n = 1;
if(argc > 1)
m = atoi(argv[1]);
for (int i=0;i<m;i++)
n*=2; // n=2^m
// allocate memory in CPU for calculation
float* a;
float* b;
float* c_serial;
float* c;
a = (float*)malloc(n*n * sizeof(float));
b = (float*)malloc(n*n * sizeof(float));
c_serial = (float*)malloc(n*n * sizeof(float));
c = (float*)malloc(n*n * sizeof(float));
// fill a, b matrices with random values between -16.0f and 16.0f
srand(0);
fill(a, n*n);
fill(b, n*n);
// time measurement for CPU calculation
clock_t t0 = clock();
if (m<=10) {
cpuKernel (a, b, c_serial, n);
} else {
cpuKernel_y (a, b, c_serial, n, 0); // 1st row
}
clock_t t1 = clock();
// time measurement for GPU calculation
clock_t t2 = clock();
gpuKernel (a, b, c, n);
clock_t t3 = clock();
// check correctness of calculation
float mse;
if (m<=10) {
mse = calc_mse( c_serial, c, n*n );
} else {
mse = calc_mse( c_serial, c, n ); // 1st row
}
printf("n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n",n, (t1-t0)/1000, (t3-t2)/1000, mse);
// free allocated memory for later use
free(a);
free(b);
free(c_serial);
free(c);
return 0;
}
//-----------------------------------------------------------------------------
void fill(float* data, int size) {
for (int i=0; i<size; ++i)
data[i] = (float) (rand() % 33 - 16);
}
double calc_mse (float* data1, float* data2, int size) {
double mse = 0.0;
int i; for (i=0; i<size; i++) {
double e = data1[i]-data2[i];
e = e * e;
mse += e;
}
mse = mse / size;
return mse;
}
//-----------------------------------------------------------------------------
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x) { // one element
m(c,y,x)=0;
for(int k=0; k<n; k++) {
m(c,y,x) += m(a,y,k) * m(b,k,x);
}
}
void cpuKernel_y(float* a, float* b, float* c, int n, int y) { // one row
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
void cpuKernel(float* a, float* b, float* c, int n) { // entire matrix
for(int y=0; y<n; y++)
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
//-----------------------------------------------------------------------------
/*__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
// write your GPU kernel function here
// note that maximum # of threads per block is 1024
int bx=blockIdx.x;
int by=blockIdx.y;
int bz=blockIdx.z;
int tx=threadIdx.x;
long i,j;
float sum=0;
i= bx+(by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)/n;
j= (by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)%n;
for(long k=0; k<n; k++) {
sum += m(ad,i,k) * m(bd,k,j);
}
m(cd,i,j) = sum;
}*/
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
int tx = threadIdx.x;
int by = blockIdx.y;
int bx = blockIdx.x;
int row = bx;
float s = 0.0f;
int column;
column = (by)*(blockDim.x)+tx;
int k; for (k=0; k<n; k++)
s += m(ad,row,k) * m(bd,k,column);
m(cd,row,column) = s;
}
//-----------------------------------------------------------------------------
void gpuKernel(float* a, float* b, float* c, int n) {
// allocate memory on GPU
// copy data to GPU
// call kernelFunc
// copy the results back to CPU
// free GPU memory
float *ad, *bd, *cd;
hipMalloc((void**)&ad, n*n*sizeof(float));
hipMalloc((void**)&bd, n*n*sizeof(float));
hipMalloc((void**)&cd, n*n*sizeof(float));
hipMemcpy(ad, a, n*n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(bd, b, n*n*sizeof(float), hipMemcpyHostToDevice);
//kernelFunc <<<dim3(n/MAX_THREADS,n/MAX_THREADS,MAX_THREADS),MAX_THREADS>>> (ad,bd,cd,n);
kernelFunc<<< dim3(n,1,1), n >>>(ad, bd, cd, n);
hipMemcpy(c, cd, n*n*sizeof(float), hipMemcpyDeviceToHost);
hipFree(ad);
hipFree(bd);
hipFree(cd);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#define m(data,y,x) data[y*n+x]
#define MAX_THREADS 1024
// ===========================> Functions Prototype <===============================
void fill(float* data, int size);
double calc_mse(float* data1, float* data2, int size);
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x);
void cpuKernel_y(float* a, float* b, float* c, int n, int y);
void cpuKernel(float* a, float* b, float* c, int n);
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n);
void gpuKernel(float* a, float* b, float* c, int n);
// =================================================================================
int main(int argc, char** argv) {
struct hipDeviceProp_t p;
hipGetDeviceProperties(&p, 0);
printf("Device Name: %s\n", p.name);
// get parameter from command line to build Matrix dimension
// check for 10<=m<=13, because m>=14 do not fit in the memory of our GPU, i.e., 1GB.
int m = 5;
int n = 1;
if(argc > 1)
m = atoi(argv[1]);
for (int i=0;i<m;i++)
n*=2; // n=2^m
// allocate memory in CPU for calculation
float* a;
float* b;
float* c_serial;
float* c;
a = (float*)malloc(n*n * sizeof(float));
b = (float*)malloc(n*n * sizeof(float));
c_serial = (float*)malloc(n*n * sizeof(float));
c = (float*)malloc(n*n * sizeof(float));
// fill a, b matrices with random values between -16.0f and 16.0f
srand(0);
fill(a, n*n);
fill(b, n*n);
// time measurement for CPU calculation
clock_t t0 = clock();
if (m<=10) {
cpuKernel (a, b, c_serial, n);
} else {
cpuKernel_y (a, b, c_serial, n, 0); // 1st row
}
clock_t t1 = clock();
// time measurement for GPU calculation
clock_t t2 = clock();
gpuKernel (a, b, c, n);
clock_t t3 = clock();
// check correctness of calculation
float mse;
if (m<=10) {
mse = calc_mse( c_serial, c, n*n );
} else {
mse = calc_mse( c_serial, c, n ); // 1st row
}
printf("n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n",n, (t1-t0)/1000, (t3-t2)/1000, mse);
// free allocated memory for later use
free(a);
free(b);
free(c_serial);
free(c);
return 0;
}
//-----------------------------------------------------------------------------
void fill(float* data, int size) {
for (int i=0; i<size; ++i)
data[i] = (float) (rand() % 33 - 16);
}
double calc_mse (float* data1, float* data2, int size) {
double mse = 0.0;
int i; for (i=0; i<size; i++) {
double e = data1[i]-data2[i];
e = e * e;
mse += e;
}
mse = mse / size;
return mse;
}
//-----------------------------------------------------------------------------
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x) { // one element
m(c,y,x)=0;
for(int k=0; k<n; k++) {
m(c,y,x) += m(a,y,k) * m(b,k,x);
}
}
void cpuKernel_y(float* a, float* b, float* c, int n, int y) { // one row
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
void cpuKernel(float* a, float* b, float* c, int n) { // entire matrix
for(int y=0; y<n; y++)
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
//-----------------------------------------------------------------------------
/*__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
// write your GPU kernel function here
// note that maximum # of threads per block is 1024
int bx=blockIdx.x;
int by=blockIdx.y;
int bz=blockIdx.z;
int tx=threadIdx.x;
long i,j;
float sum=0;
i= bx+(by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)/n;
j= (by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)%n;
for(long k=0; k<n; k++) {
sum += m(ad,i,k) * m(bd,k,j);
}
m(cd,i,j) = sum;
}*/
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
int tx = threadIdx.x;
int by = blockIdx.y;
int bx = blockIdx.x;
int row = bx;
float s = 0.0f;
int column;
column = (by)*(blockDim.x)+tx;
int k; for (k=0; k<n; k++)
s += m(ad,row,k) * m(bd,k,column);
m(cd,row,column) = s;
}
//-----------------------------------------------------------------------------
void gpuKernel(float* a, float* b, float* c, int n) {
// allocate memory on GPU
// copy data to GPU
// call kernelFunc
// copy the results back to CPU
// free GPU memory
float *ad, *bd, *cd;
hipMalloc((void**)&ad, n*n*sizeof(float));
hipMalloc((void**)&bd, n*n*sizeof(float));
hipMalloc((void**)&cd, n*n*sizeof(float));
hipMemcpy(ad, a, n*n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(bd, b, n*n*sizeof(float), hipMemcpyHostToDevice);
//kernelFunc <<<dim3(n/MAX_THREADS,n/MAX_THREADS,MAX_THREADS),MAX_THREADS>>> (ad,bd,cd,n);
kernelFunc<<< dim3(n,1,1), n >>>(ad, bd, cd, n);
hipMemcpy(c, cd, n*n*sizeof(float), hipMemcpyDeviceToHost);
hipFree(ad);
hipFree(bd);
hipFree(cd);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10kernelFuncPfS_S_i
.globl _Z10kernelFuncPfS_S_i
.p2align 8
.type _Z10kernelFuncPfS_S_i,@function
_Z10kernelFuncPfS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_cmp_lt_i32 s8, 1
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_cbranch_scc1 .LBB0_3
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s2, s14, s8
v_mov_b32_e32 v0, 0
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(VALU_DEP_2)
v_mov_b32_e32 v2, v1
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s4, s2
s_addc_u32 s3, s5, s3
s_mov_b32 s4, s8
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v3, 31, v2
s_load_b32 s5, s[2:3], 0x0
s_add_i32 s4, s4, -1
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_add_nc_u32_e32 v2, s8, v2
s_cmp_lg_u32 s4, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v0, s5, v3
s_cbranch_scc1 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v0, 0
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s14, s8, v[1:2]
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s0, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10kernelFuncPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10kernelFuncPfS_S_i, .Lfunc_end0-_Z10kernelFuncPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10kernelFuncPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10kernelFuncPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#define m(data,y,x) data[y*n+x]
#define MAX_THREADS 1024
// ===========================> Functions Prototype <===============================
void fill(float* data, int size);
double calc_mse(float* data1, float* data2, int size);
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x);
void cpuKernel_y(float* a, float* b, float* c, int n, int y);
void cpuKernel(float* a, float* b, float* c, int n);
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n);
void gpuKernel(float* a, float* b, float* c, int n);
// =================================================================================
int main(int argc, char** argv) {
struct hipDeviceProp_t p;
hipGetDeviceProperties(&p, 0);
printf("Device Name: %s\n", p.name);
// get parameter from command line to build Matrix dimension
// check for 10<=m<=13, because m>=14 do not fit in the memory of our GPU, i.e., 1GB.
int m = 5;
int n = 1;
if(argc > 1)
m = atoi(argv[1]);
for (int i=0;i<m;i++)
n*=2; // n=2^m
// allocate memory in CPU for calculation
float* a;
float* b;
float* c_serial;
float* c;
a = (float*)malloc(n*n * sizeof(float));
b = (float*)malloc(n*n * sizeof(float));
c_serial = (float*)malloc(n*n * sizeof(float));
c = (float*)malloc(n*n * sizeof(float));
// fill a, b matrices with random values between -16.0f and 16.0f
srand(0);
fill(a, n*n);
fill(b, n*n);
// time measurement for CPU calculation
clock_t t0 = clock();
if (m<=10) {
cpuKernel (a, b, c_serial, n);
} else {
cpuKernel_y (a, b, c_serial, n, 0); // 1st row
}
clock_t t1 = clock();
// time measurement for GPU calculation
clock_t t2 = clock();
gpuKernel (a, b, c, n);
clock_t t3 = clock();
// check correctness of calculation
float mse;
if (m<=10) {
mse = calc_mse( c_serial, c, n*n );
} else {
mse = calc_mse( c_serial, c, n ); // 1st row
}
printf("n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n",n, (t1-t0)/1000, (t3-t2)/1000, mse);
// free allocated memory for later use
free(a);
free(b);
free(c_serial);
free(c);
return 0;
}
//-----------------------------------------------------------------------------
void fill(float* data, int size) {
for (int i=0; i<size; ++i)
data[i] = (float) (rand() % 33 - 16);
}
double calc_mse (float* data1, float* data2, int size) {
double mse = 0.0;
int i; for (i=0; i<size; i++) {
double e = data1[i]-data2[i];
e = e * e;
mse += e;
}
mse = mse / size;
return mse;
}
//-----------------------------------------------------------------------------
void cpuKernel_yx(float* a, float* b, float* c, int n, int y, int x) { // one element
m(c,y,x)=0;
for(int k=0; k<n; k++) {
m(c,y,x) += m(a,y,k) * m(b,k,x);
}
}
void cpuKernel_y(float* a, float* b, float* c, int n, int y) { // one row
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
void cpuKernel(float* a, float* b, float* c, int n) { // entire matrix
for(int y=0; y<n; y++)
for(int x=0; x<n; x++)
{
cpuKernel_yx(a,b,c,n,y,x);
}
}
//-----------------------------------------------------------------------------
/*__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
// write your GPU kernel function here
// note that maximum # of threads per block is 1024
int bx=blockIdx.x;
int by=blockIdx.y;
int bz=blockIdx.z;
int tx=threadIdx.x;
long i,j;
float sum=0;
i= bx+(by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)/n;
j= (by*MAX_THREADS*MAX_THREADS+bz*MAX_THREADS+tx)%n;
for(long k=0; k<n; k++) {
sum += m(ad,i,k) * m(bd,k,j);
}
m(cd,i,j) = sum;
}*/
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
int tx = threadIdx.x;
int by = blockIdx.y;
int bx = blockIdx.x;
int row = bx;
float s = 0.0f;
int column;
column = (by)*(blockDim.x)+tx;
int k; for (k=0; k<n; k++)
s += m(ad,row,k) * m(bd,k,column);
m(cd,row,column) = s;
}
//-----------------------------------------------------------------------------
void gpuKernel(float* a, float* b, float* c, int n) {
// allocate memory on GPU
// copy data to GPU
// call kernelFunc
// copy the results back to CPU
// free GPU memory
float *ad, *bd, *cd;
hipMalloc((void**)&ad, n*n*sizeof(float));
hipMalloc((void**)&bd, n*n*sizeof(float));
hipMalloc((void**)&cd, n*n*sizeof(float));
hipMemcpy(ad, a, n*n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(bd, b, n*n*sizeof(float), hipMemcpyHostToDevice);
//kernelFunc <<<dim3(n/MAX_THREADS,n/MAX_THREADS,MAX_THREADS),MAX_THREADS>>> (ad,bd,cd,n);
kernelFunc<<< dim3(n,1,1), n >>>(ad, bd, cd, n);
hipMemcpy(c, cd, n*n*sizeof(float), hipMemcpyDeviceToHost);
hipFree(ad);
hipFree(bd);
hipFree(cd);
} | .text
.file "mm.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1512, %rsp # imm = 0x5E8
.cfi_def_cfa_offset 1568
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
leaq 40(%rsp), %r14
movq %r14, %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
movl $5, %eax
cmpl $2, %ebp
jl .LBB0_2
# %bb.1:
movq 8(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
.LBB0_2:
testl %eax, %eax
movq %rax, 32(%rsp) # 8-byte Spill
movl $1, %ebx
jle .LBB0_5
# %bb.3: # %.lr.ph.preheader
# kill: def $eax killed $eax killed $rax
.p2align 4, 0x90
.LBB0_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addl %ebx, %ebx
decl %eax
jne .LBB0_4
.LBB0_5: # %._crit_edge
movl %ebx, %ebp
imull %ebp, %ebp
leaq (,%rbp,4), %r13
movq %r13, %rdi
callq malloc
movq %rax, %r14
movq %r13, %rdi
callq malloc
movq %rax, %r15
movq %r13, %rdi
callq malloc
movq %rax, %r12
movq %r13, %rdi
callq malloc
movq %rax, 8(%rsp) # 8-byte Spill
xorl %r13d, %r13d
xorl %edi, %edi
callq srand
.p2align 4, 0x90
.LBB0_6: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1041204193, %rax, %rcx # imm = 0x3E0F83E1
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
movl %ecx, %edx
shll $5, %edx
addl %ecx, %edx
negl %edx
addl %edx, %eax
addl $-16, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r14,%r13,4)
incq %r13
cmpq %r13, %rbp
jne .LBB0_6
# %bb.7: # %.lr.ph.i54.preheader
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB0_8: # %.lr.ph.i54
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1041204193, %rax, %rcx # imm = 0x3E0F83E1
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
movl %ecx, %edx
shll $5, %edx
addl %ecx, %edx
negl %edx
addl %edx, %eax
addl $-16, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r15,%r13,4)
incq %r13
cmpq %r13, %rbp
jne .LBB0_8
# %bb.9: # %_Z4fillPfi.exit58
callq clock
cmpl $11, 32(%rsp) # 4-byte Folded Reload
movq %rax, 24(%rsp) # 8-byte Spill
jge .LBB0_17
# %bb.10:
testl %ebx, %ebx
movq 8(%rsp), %r13 # 8-byte Reload
jle .LBB0_22
# %bb.11: # %.preheader.lr.ph.i
movl %ebx, %eax
leaq (,%rax,4), %rcx
xorl %edx, %edx
movq %r14, %rsi
.p2align 4, 0x90
.LBB0_12: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB0_13 Depth 2
# Child Loop BB0_14 Depth 3
movl %edx, %edi
imull %eax, %edi
leaq (%r12,%rdi,4), %rdi
movq %r15, %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB0_13: # %.lr.ph.i.i
# Parent Loop BB0_12 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB0_14 Depth 3
movl $0, (%rdi,%r9,4)
xorps %xmm0, %xmm0
movq %r8, %r10
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB0_14: # Parent Loop BB0_12 Depth=1
# Parent Loop BB0_13 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rsi,%r11), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r10), %xmm1
addss %xmm1, %xmm0
addq $4, %r11
addq %rcx, %r10
cmpq %r11, %rcx
jne .LBB0_14
# %bb.15: # %_Z12cpuKernel_yxPfS_S_iii.exit.i
# in Loop: Header=BB0_13 Depth=2
movss %xmm0, (%rdi,%r9,4)
incq %r9
addq $4, %r8
cmpq %rax, %r9
jne .LBB0_13
# %bb.16: # %._crit_edge.i
# in Loop: Header=BB0_12 Depth=1
incq %rdx
addq %rcx, %rsi
cmpq %rax, %rdx
jne .LBB0_12
jmp .LBB0_22
.LBB0_17:
testl %ebx, %ebx
movq 8(%rsp), %r13 # 8-byte Reload
jle .LBB0_22
# %bb.18: # %.lr.ph.i64
movl %ebx, %eax
leaq (,%rax,4), %rcx
xorl %edx, %edx
movq %r15, %rsi
.p2align 4, 0x90
.LBB0_19: # %.lr.ph.i.i67
# =>This Loop Header: Depth=1
# Child Loop BB0_20 Depth 2
movl $0, (%r12,%rdx,4)
xorps %xmm0, %xmm0
movq %rsi, %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB0_20: # Parent Loop BB0_19 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%r8), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rdi), %xmm1
addss %xmm1, %xmm0
addq $4, %r8
addq %rcx, %rdi
cmpq %r8, %rcx
jne .LBB0_20
# %bb.21: # %_Z12cpuKernel_yxPfS_S_iii.exit.i73
# in Loop: Header=BB0_19 Depth=1
movss %xmm0, (%r12,%rdx,4)
incq %rdx
addq $4, %rsi
cmpq %rax, %rdx
jne .LBB0_19
.LBB0_22: # %_Z9cpuKernelPfS_S_i.exit
callq clock
movq %rax, 16(%rsp) # 8-byte Spill
callq clock
movq %rax, 8(%rsp) # 8-byte Spill
movq %r14, %rdi
movq %r15, %rsi
movq %r13, %rdx
movl %ebx, %ecx
callq _Z9gpuKernelPfS_S_i
callq clock
movq %rax, %rcx
cmpl $10, 32(%rsp) # 4-byte Folded Reload
jg .LBB0_26
# %bb.23: # %.lr.ph.i78.preheader
xorps %xmm0, %xmm0
xorl %eax, %eax
movq 24(%rsp), %rsi # 8-byte Reload
.p2align 4, 0x90
.LBB0_24: # %.lr.ph.i78
# =>This Inner Loop Header: Depth=1
movss (%r12,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
subss (%r13,%rax,4), %xmm1
cvtss2sd %xmm1, %xmm1
mulsd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %rax
cmpq %rax, %rbp
jne .LBB0_24
jmp .LBB0_31
.LBB0_26:
testl %ebx, %ebx
movq 24(%rsp), %rsi # 8-byte Reload
jle .LBB0_30
# %bb.27: # %.lr.ph.preheader.i
movl %ebx, %eax
xorps %xmm0, %xmm0
xorl %edx, %edx
movq 8(%rsp), %rdi # 8-byte Reload
.p2align 4, 0x90
.LBB0_28: # %.lr.ph.i84
# =>This Inner Loop Header: Depth=1
movss (%r12,%rdx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
subss (%r13,%rdx,4), %xmm1
cvtss2sd %xmm1, %xmm1
mulsd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %rdx
cmpq %rdx, %rax
jne .LBB0_28
# %bb.29:
movl %ebx, %ebp
jmp .LBB0_32
.LBB0_30:
xorps %xmm0, %xmm0
movl %ebx, %ebp
.LBB0_31: # %_Z8calc_msePfS_i.exit
movq 8(%rsp), %rdi # 8-byte Reload
.LBB0_32: # %_Z8calc_msePfS_i.exit
movq 16(%rsp), %rax # 8-byte Reload
xorps %xmm1, %xmm1
cvtsi2sd %ebp, %xmm1
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
subq %rsi, %rax
movabsq $2361183241434822607, %rsi # imm = 0x20C49BA5E353F7CF
imulq %rsi
movq %rdx, %r8
movq %rdx, %rax
shrq $63, %rax
sarq $7, %r8
addq %rax, %r8
subq %rdi, %rcx
movq %rcx, %rax
imulq %rsi
movq %rdx, %rcx
shrq $63, %rcx
sarq $7, %rdx
addq %rdx, %rcx
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movl %ebx, %esi
movq %r8, %rdx
movb $1, %al
callq printf
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
movq %r13, %rdi
callq free
xorl %eax, %eax
addq $1512, %rsp # imm = 0x5E8
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.globl _Z4fillPfi # -- Begin function _Z4fillPfi
.p2align 4, 0x90
.type _Z4fillPfi,@function
_Z4fillPfi: # @_Z4fillPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1041204193, %rax, %rcx # imm = 0x3E0F83E1
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
movl %ecx, %edx
shll $5, %edx
addl %ecx, %edx
negl %edx
addl %edx, %eax
addl $-16, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z4fillPfi, .Lfunc_end1-_Z4fillPfi
.cfi_endproc
# -- End function
.globl _Z9cpuKernelPfS_S_i # -- Begin function _Z9cpuKernelPfS_S_i
.p2align 4, 0x90
.type _Z9cpuKernelPfS_S_i,@function
_Z9cpuKernelPfS_S_i: # @_Z9cpuKernelPfS_S_i
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB2_8
# %bb.1: # %.preheader.lr.ph
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl %ecx, %eax
leaq (,%rax,4), %rcx
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB2_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
# Child Loop BB2_4 Depth 3
movl %r8d, %r9d
imull %eax, %r9d
leaq (%rdx,%r9,4), %r9
movq %rsi, %r10
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i
# Parent Loop BB2_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_4 Depth 3
movl $0, (%r9,%r11,4)
xorps %xmm0, %xmm0
movq %r10, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_4: # Parent Loop BB2_2 Depth=1
# Parent Loop BB2_3 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rdi,%r14), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rbx), %xmm1
addss %xmm1, %xmm0
movss %xmm0, (%r9,%r11,4)
addq $4, %r14
addq %rcx, %rbx
cmpq %r14, %rcx
jne .LBB2_4
# %bb.5: # %_Z12cpuKernel_yxPfS_S_iii.exit
# in Loop: Header=BB2_3 Depth=2
incq %r11
addq $4, %r10
cmpq %rax, %r11
jne .LBB2_3
# %bb.6: # %._crit_edge
# in Loop: Header=BB2_2 Depth=1
incq %r8
addq %rcx, %rdi
cmpq %rax, %r8
jne .LBB2_2
# %bb.7:
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.LBB2_8: # %._crit_edge17
retq
.Lfunc_end2:
.size _Z9cpuKernelPfS_S_i, .Lfunc_end2-_Z9cpuKernelPfS_S_i
.cfi_endproc
# -- End function
.globl _Z11cpuKernel_yPfS_S_ii # -- Begin function _Z11cpuKernel_yPfS_S_ii
.p2align 4, 0x90
.type _Z11cpuKernel_yPfS_S_ii,@function
_Z11cpuKernel_yPfS_S_ii: # @_Z11cpuKernel_yPfS_S_ii
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB3_5
# %bb.1: # %.lr.ph
imull %ecx, %r8d
movslq %r8d, %r8
movl %ecx, %eax
leaq (%rdi,%r8,4), %rcx
leaq (%rdx,%r8,4), %rdx
leaq (,%rax,4), %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph.i
# =>This Loop Header: Depth=1
# Child Loop BB3_3 Depth 2
movl $0, (%rdx,%r8,4)
xorps %xmm0, %xmm0
movq %rsi, %r9
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB3_3: # Parent Loop BB3_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rcx,%r10), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r9), %xmm1
addss %xmm1, %xmm0
movss %xmm0, (%rdx,%r8,4)
addq $4, %r10
addq %rdi, %r9
cmpq %r10, %rdi
jne .LBB3_3
# %bb.4: # %_Z12cpuKernel_yxPfS_S_iii.exit
# in Loop: Header=BB3_2 Depth=1
incq %r8
addq $4, %rsi
cmpq %rax, %r8
jne .LBB3_2
.LBB3_5: # %._crit_edge
retq
.Lfunc_end3:
.size _Z11cpuKernel_yPfS_S_ii, .Lfunc_end3-_Z11cpuKernel_yPfS_S_ii
.cfi_endproc
# -- End function
.globl _Z9gpuKernelPfS_S_i # -- Begin function _Z9gpuKernelPfS_S_i
.p2align 4, 0x90
.type _Z9gpuKernelPfS_S_i,@function
_Z9gpuKernelPfS_S_i: # @_Z9gpuKernelPfS_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %ebp
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r12
movl %ecx, %r14d
imull %r14d, %r14d
shlq $2, %r14
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl %ebp, %eax
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rax, %rdi
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebp, 36(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z10kernelFuncPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_2:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z9gpuKernelPfS_S_i, .Lfunc_end4-_Z9gpuKernelPfS_S_i
.cfi_endproc
# -- End function
.globl _Z8calc_msePfS_i # -- Begin function _Z8calc_msePfS_i
.p2align 4, 0x90
.type _Z8calc_msePfS_i,@function
_Z8calc_msePfS_i: # @_Z8calc_msePfS_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB5_1
# %bb.2: # %.lr.ph.preheader
movl %edx, %eax
xorpd %xmm0, %xmm0
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
subss (%rsi,%rcx,4), %xmm1
cvtss2sd %xmm1, %xmm1
mulsd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %rcx
cmpq %rcx, %rax
jne .LBB5_3
jmp .LBB5_4
.LBB5_1:
xorpd %xmm0, %xmm0
.LBB5_4: # %._crit_edge
xorps %xmm1, %xmm1
cvtsi2sd %edx, %xmm1
divsd %xmm1, %xmm0
retq
.Lfunc_end5:
.size _Z8calc_msePfS_i, .Lfunc_end5-_Z8calc_msePfS_i
.cfi_endproc
# -- End function
.globl _Z12cpuKernel_yxPfS_S_iii # -- Begin function _Z12cpuKernel_yxPfS_S_iii
.p2align 4, 0x90
.type _Z12cpuKernel_yxPfS_S_iii,@function
_Z12cpuKernel_yxPfS_S_iii: # @_Z12cpuKernel_yxPfS_S_iii
.cfi_startproc
# %bb.0:
# kill: def $r9d killed $r9d def $r9
# kill: def $r8d killed $r8d def $r8
imull %ecx, %r8d
leal (%r8,%r9), %eax
cltq
movl $0, (%rdx,%rax,4)
testl %ecx, %ecx
jle .LBB6_3
# %bb.1: # %.lr.ph
movss (%rdx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movslq %r8d, %r8
movslq %r9d, %r9
movl %ecx, %ecx
leaq (%rdi,%r8,4), %rdi
leaq (%rsi,%r9,4), %rsi
leaq (,%rcx,4), %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB6_2: # =>This Inner Loop Header: Depth=1
movss (%rdi,%r9,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rsi), %xmm1
addss %xmm1, %xmm0
movss %xmm0, (%rdx,%rax,4)
incq %r9
addq %r8, %rsi
cmpq %r9, %rcx
jne .LBB6_2
.LBB6_3: # %._crit_edge
retq
.Lfunc_end6:
.size _Z12cpuKernel_yxPfS_S_iii, .Lfunc_end6-_Z12cpuKernel_yxPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z25__device_stub__kernelFuncPfS_S_i # -- Begin function _Z25__device_stub__kernelFuncPfS_S_i
.p2align 4, 0x90
.type _Z25__device_stub__kernelFuncPfS_S_i,@function
_Z25__device_stub__kernelFuncPfS_S_i: # @_Z25__device_stub__kernelFuncPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10kernelFuncPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end7:
.size _Z25__device_stub__kernelFuncPfS_S_i, .Lfunc_end7-_Z25__device_stub__kernelFuncPfS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10kernelFuncPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Device Name: %s\n"
.size .L.str, 17
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n"
.size .L.str.1, 40
.type _Z10kernelFuncPfS_S_i,@object # @_Z10kernelFuncPfS_S_i
.section .rodata,"a",@progbits
.globl _Z10kernelFuncPfS_S_i
.p2align 3, 0x0
_Z10kernelFuncPfS_S_i:
.quad _Z25__device_stub__kernelFuncPfS_S_i
.size _Z10kernelFuncPfS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10kernelFuncPfS_S_i"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__kernelFuncPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10kernelFuncPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10kernelFuncPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e220000002600 */
/*0020*/ MOV R0, c[0x0][0x178] ; /* 0x00005e0000007a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R21, -RZ, RZ, 0, 0 ; /* 0x00000000ff157435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fc60003f06270 */
/*0070*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e620000002500 */
/*0080*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fe400078e0203 */
/*0090*/ IMAD R4, R4, c[0x0][0x178], RZ ; /* 0x00005e0004047a24 */
/* 0x002fce00078e02ff */
/*00a0*/ @!P0 BRA 0xb90 ; /* 0x00000ae000008947 */
/* 0x000fea0003800000 */
/*00b0*/ IADD3 R3, R0.reuse, -0x1, RZ ; /* 0xffffffff00037810 */
/* 0x040fe40007ffe0ff */
/*00c0*/ LOP3.LUT R5, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300057812 */
/* 0x000fe400078ec0ff */
/*00d0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fe40003f06070 */
/*00e0*/ MOV R21, RZ ; /* 0x000000ff00157202 */
/* 0x000fe40000000f00 */
/*00f0*/ MOV R3, RZ ; /* 0x000000ff00037202 */
/* 0x000fd20000000f00 */
/*0100*/ @!P0 BRA 0xa90 ; /* 0x0000098000008947 */
/* 0x000fea0003800000 */
/*0110*/ IADD3 R6, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005067a10 */
/* 0x000fe20007ffe1ff */
/*0120*/ HFMA2.MMA R19, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff137435 */
/* 0x000fe200000001ff */
/*0130*/ MOV R21, RZ ; /* 0x000000ff00157202 */
/* 0x000fe20000000f00 */
/*0140*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */
/* 0x000fe200000001ff */
/*0150*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f04270 */
/*0160*/ MOV R8, c[0x0][0x160] ; /* 0x0000580000087a02 */
/* 0x000fe40000000f00 */
/*0170*/ MOV R7, c[0x0][0x164] ; /* 0x0000590000077a02 */
/* 0x000fc60000000f00 */
/*0180*/ IMAD.WIDE R18, R2, R19, c[0x0][0x168] ; /* 0x00005a0002127625 */
/* 0x000fcc00078e0213 */
/*0190*/ @!P0 BRA 0x910 ; /* 0x0000077000008947 */
/* 0x000fea0003800000 */
/*01a0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe40003f24270 */
/*01b0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01c0*/ @!P1 BRA 0x660 ; /* 0x0000049000009947 */
/* 0x000fea0003800000 */
/*01d0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*01e0*/ MOV R9, R7 ; /* 0x0000000700097202 */
/* 0x000fe20000000f00 */
/*01f0*/ LDG.E R10, [R18.64] ; /* 0x00000004120a7981 */
/* 0x0000a8000c1e1900 */
/*0200*/ IMAD.WIDE R12, R4, 0x4, R8 ; /* 0x00000004040c7825 */
/* 0x000fc800078e0208 */
/*0210*/ IMAD.WIDE R22, R0.reuse, 0x4, R18 ; /* 0x0000000400167825 */
/* 0x040fe200078e0212 */
/*0220*/ LDG.E R25, [R12.64] ; /* 0x000000040c197981 */
/* 0x000ea8000c1e1900 */
/*0230*/ LDG.E R24, [R22.64] ; /* 0x0000000416187981 */
/* 0x0002e8000c1e1900 */
/*0240*/ LDG.E R11, [R12.64+0x4] ; /* 0x000004040c0b7981 */
/* 0x000ee8000c1e1900 */
/*0250*/ LDG.E R14, [R12.64+0x8] ; /* 0x000008040c0e7981 */
/* 0x000f22000c1e1900 */
/*0260*/ IMAD.WIDE R22, R0, 0x4, R22 ; /* 0x0000000400167825 */
/* 0x002fc600078e0216 */
/*0270*/ LDG.E R19, [R12.64+0x10] ; /* 0x000010040c137981 */
/* 0x001f68000c1e1900 */
/*0280*/ LDG.E R15, [R22.64] ; /* 0x00000004160f7981 */
/* 0x000122000c1e1900 */
/*0290*/ IMAD.WIDE R16, R0, 0x4, R22 ; /* 0x0000000400107825 */
/* 0x000fc600078e0216 */
/*02a0*/ LDG.E R9, [R12.64+0x14] ; /* 0x000014040c097981 */
/* 0x000f66000c1e1900 */
/*02b0*/ IMAD.WIDE R26, R0.reuse, 0x4, R16 ; /* 0x00000004001a7825 */
/* 0x040fe400078e0210 */
/*02c0*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000368000c1e1900 */
/*02d0*/ IMAD.WIDE R28, R0.reuse, 0x4, R26 ; /* 0x00000004001c7825 */
/* 0x040fe200078e021a */
/*02e0*/ LDG.E R18, [R26.64] ; /* 0x000000041a127981 */
/* 0x000168000c1e1900 */
/*02f0*/ LDG.E R20, [R28.64] ; /* 0x000000041c147981 */
/* 0x000968000c1e1900 */
/*0300*/ LDG.E R17, [R12.64+0xc] ; /* 0x00000c040c117981 */
/* 0x002f62000c1e1900 */
/*0310*/ IMAD.WIDE R22, R0, 0x4, R28 ; /* 0x0000000400167825 */
/* 0x001fc600078e021c */
/*0320*/ LDG.E R26, [R12.64+0x1c] ; /* 0x00001c040c1a7981 */
/* 0x000f62000c1e1900 */
/*0330*/ FFMA R25, R10, R25, R21 ; /* 0x000000190a197223 */
/* 0x004fc60000000015 */
/*0340*/ LDG.E R10, [R22.64] ; /* 0x00000004160a7981 */
/* 0x0000a8000c1e1900 */
/*0350*/ LDG.E R21, [R12.64+0x18] ; /* 0x000018040c157981 */
/* 0x000ea2000c1e1900 */
/*0360*/ IMAD.WIDE R22, R0, 0x4, R22 ; /* 0x0000000400167825 */
/* 0x001fc800078e0216 */
/*0370*/ FFMA R27, R24, R11, R25 ; /* 0x0000000b181b7223 */
/* 0x008fe40000000019 */
/*0380*/ IMAD.WIDE R24, R0.reuse, 0x4, R22 ; /* 0x0000000400187825 */
/* 0x040fe200078e0216 */
/*0390*/ LDG.E R11, [R22.64] ; /* 0x00000004160b7981 */
/* 0x0000e6000c1e1900 */
/*03a0*/ FFMA R29, R15, R14, R27 ; /* 0x0000000e0f1d7223 */
/* 0x010fe2000000001b */
/*03b0*/ LDG.E R28, [R24.64] ; /* 0x00000004181c7981 */
/* 0x000328000c1e1900 */
/*03c0*/ LDG.E R27, [R12.64+0x20] ; /* 0x000020040c1b7981 */
/* 0x000f22000c1e1900 */
/*03d0*/ IMAD.WIDE R14, R0, 0x4, R24 ; /* 0x00000004000e7825 */
/* 0x000fc600078e0218 */
/*03e0*/ LDG.E R25, [R12.64+0x28] ; /* 0x000028040c197981 */
/* 0x002f22000c1e1900 */
/*03f0*/ FFMA R29, R16, R17, R29 ; /* 0x00000011101d7223 */
/* 0x020fe4000000001d */
/*0400*/ IMAD.WIDE R16, R0, 0x4, R14 ; /* 0x0000000400107825 */
/* 0x000fe400078e020e */
/*0410*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000364000c1e1900 */
/*0420*/ FFMA R29, R18, R19, R29 ; /* 0x00000013121d7223 */
/* 0x000fe4000000001d */
/*0430*/ IMAD.WIDE R18, R0, 0x4, R16 ; /* 0x0000000400127825 */
/* 0x000fe400078e0210 */
/*0440*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000364000c1e1900 */
/*0450*/ FFMA R20, R20, R9, R29 ; /* 0x0000000914147223 */
/* 0x000fc4000000001d */
/*0460*/ LDG.E R9, [R12.64+0x24] ; /* 0x000024040c097981 */
/* 0x000f62000c1e1900 */
/*0470*/ IMAD.WIDE R22, R0, 0x4, R18 ; /* 0x0000000400167825 */
/* 0x001fc600078e0212 */
/*0480*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */
/* 0x000768000c1e1900 */
/*0490*/ LDG.E R15, [R12.64+0x2c] ; /* 0x00002c040c0f7981 */
/* 0x002f68000c1e1900 */
/*04a0*/ LDG.E R29, [R22.64] ; /* 0x00000004161d7981 */
/* 0x000162000c1e1900 */
/*04b0*/ FFMA R10, R10, R21, R20 ; /* 0x000000150a0a7223 */
/* 0x004fe40000000014 */
/*04c0*/ IMAD.WIDE R20, R0, 0x4, R22 ; /* 0x0000000400147825 */
/* 0x000fc400078e0216 */
/*04d0*/ LDG.E R22, [R12.64+0x38] ; /* 0x000038040c167981 */
/* 0x001ea8000c1e1900 */
/*04e0*/ LDG.E R17, [R20.64] ; /* 0x0000000414117981 */
/* 0x0008a2000c1e1900 */
/*04f0*/ FFMA R18, R11, R26, R10 ; /* 0x0000001a0b127223 */
/* 0x008fc6000000000a */
/*0500*/ LDG.E R26, [R12.64+0x30] ; /* 0x000030040c1a7981 */
/* 0x000ee2000c1e1900 */
/*0510*/ IMAD.WIDE R10, R0, 0x4, R20 ; /* 0x00000004000a7825 */
/* 0x000fc800078e0214 */
/*0520*/ FFMA R20, R28, R27, R18 ; /* 0x0000001b1c147223 */
/* 0x010fe40000000012 */
/*0530*/ LDG.E R28, [R12.64+0x34] ; /* 0x000034040c1c7981 */
/* 0x000ea2000c1e1900 */
/*0540*/ IMAD.WIDE R18, R0, 0x4, R10 ; /* 0x0000000400127825 */
/* 0x000fc600078e020a */
/*0550*/ LDG.E R27, [R10.64] ; /* 0x000000040a1b7981 */
/* 0x000128000c1e1900 */
/*0560*/ LDG.E R23, [R18.64] ; /* 0x0000000412177981 */
/* 0x000328000c1e1900 */
/*0570*/ LDG.E R10, [R12.64+0x3c] ; /* 0x00003c040c0a7981 */
/* 0x001f22000c1e1900 */
/*0580*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */
/* 0x000fe20007ffe0ff */
/*0590*/ FFMA R9, R14, R9, R20 ; /* 0x000000090e097223 */
/* 0x020fc80000000014 */
/*05a0*/ FFMA R9, R16, R25, R9 ; /* 0x0000001910097223 */
/* 0x000fe20000000009 */
/*05b0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fc60003f24270 */
/*05c0*/ FFMA R9, R24, R15, R9 ; /* 0x0000000f18097223 */
/* 0x000fe20000000009 */
/*05d0*/ IADD3 R8, P2, R8, 0x40, RZ ; /* 0x0000004008087810 */
/* 0x000fe20007f5e0ff */
/*05e0*/ IMAD.WIDE R18, R0, 0x4, R18 ; /* 0x0000000400127825 */
/* 0x002fe200078e0212 */
/*05f0*/ IADD3 R3, R3, 0x10, RZ ; /* 0x0000001003037810 */
/* 0x000fe40007ffe0ff */
/*0600*/ IADD3.X R7, RZ, R7, RZ, P2, !PT ; /* 0x00000007ff077210 */
/* 0x000fe200017fe4ff */
/*0610*/ FFMA R9, R29, R26, R9 ; /* 0x0000001a1d097223 */
/* 0x008fc80000000009 */
/*0620*/ FFMA R9, R17, R28, R9 ; /* 0x0000001c11097223 */
/* 0x004fc80000000009 */
/*0630*/ FFMA R9, R27, R22, R9 ; /* 0x000000161b097223 */
/* 0x010fc80000000009 */
/*0640*/ FFMA R21, R23, R10, R9 ; /* 0x0000000a17157223 */
/* 0x000fe20000000009 */
/*0650*/ @P1 BRA 0x1e0 ; /* 0xfffffb8000001947 */
/* 0x000fea000383ffff */
/*0660*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*0670*/ @!P1 BRA 0x8f0 ; /* 0x0000027000009947 */
/* 0x000fea0003800000 */
/*0680*/ MOV R9, R7 ; /* 0x0000000700097202 */
/* 0x000fe20000000f00 */
/*0690*/ LDG.E R20, [R18.64] ; /* 0x0000000412147981 */
/* 0x0000a8000c1e1900 */
/*06a0*/ IMAD.WIDE R10, R4, 0x4, R8 ; /* 0x00000004040a7825 */
/* 0x000fca00078e0208 */
/*06b0*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */
/* 0x000ea2000c1e1900 */
/*06c0*/ IMAD.WIDE R22, R0, 0x4, R18 ; /* 0x0000000400167825 */
/* 0x000fc600078e0212 */
/*06d0*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee6000c1e1900 */
/*06e0*/ IMAD.WIDE R12, R0.reuse, 0x4, R22 ; /* 0x00000004000c7825 */
/* 0x040fe200078e0216 */
/*06f0*/ LDG.E R24, [R22.64] ; /* 0x0000000416187981 */
/* 0x0002e8000c1e1900 */
/*0700*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */
/* 0x000962000c1e1900 */
/*0710*/ IMAD.WIDE R14, R0, 0x4, R12 ; /* 0x00000004000e7825 */
/* 0x000fc600078e020c */
/*0720*/ LDG.E R27, [R10.64+0x8] ; /* 0x000008040a1b7981 */
/* 0x000f66000c1e1900 */
/*0730*/ IMAD.WIDE R16, R0.reuse, 0x4, R14 ; /* 0x0000000400107825 */
/* 0x040fe200078e020e */
/*0740*/ LDG.E R29, [R10.64+0xc] ; /* 0x00000c040a1d7981 */
/* 0x000f68000c1e1900 */
/*0750*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000962000c1e1900 */
/*0760*/ IMAD.WIDE R18, R0, 0x4, R16 ; /* 0x0000000400127825 */
/* 0x001fc600078e0210 */
/*0770*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000166000c1e1900 */
/*0780*/ IMAD.WIDE R22, R0.reuse, 0x4, R18 ; /* 0x0000000400167825 */
/* 0x042fe200078e0212 */
/*0790*/ LDG.E R28, [R18.64] ; /* 0x00000004121c7981 */
/* 0x000368000c1e1900 */
/*07a0*/ LDG.E R15, [R10.64+0x10] ; /* 0x000010040a0f7981 */
/* 0x010f22000c1e1900 */
/*07b0*/ IMAD.WIDE R12, R0, 0x4, R22 ; /* 0x00000004000c7825 */
/* 0x000fc600078e0216 */
/*07c0*/ LDG.E R18, [R10.64+0x1c] ; /* 0x00001c040a127981 */
/* 0x002f28000c1e1900 */
/*07d0*/ LDG.E R17, [R12.64] ; /* 0x000000040c117981 */
/* 0x001f22000c1e1900 */
/*07e0*/ FFMA R19, R20, R9, R21 ; /* 0x0000000914137223 */
/* 0x004fc60000000015 */
/*07f0*/ LDG.E R21, [R10.64+0x14] ; /* 0x000014040a157981 */
/* 0x000ea8000c1e1900 */
/*0800*/ LDG.E R9, [R22.64] ; /* 0x0000000416097981 */
/* 0x000ea8000c1e1900 */
/*0810*/ LDG.E R20, [R10.64+0x18] ; /* 0x000018040a147981 */
/* 0x000ea2000c1e1900 */
/*0820*/ FFMA R24, R24, R25, R19 ; /* 0x0000001918187223 */
/* 0x008fc80000000013 */
/*0830*/ FFMA R24, R26, R27, R24 ; /* 0x0000001b1a187223 */
/* 0x020fc80000000018 */
/*0840*/ FFMA R14, R14, R29, R24 ; /* 0x0000001d0e0e7223 */
/* 0x000fe20000000018 */
/*0850*/ IADD3 R8, P1, R8, 0x20, RZ ; /* 0x0000002008087810 */
/* 0x000fc60007f3e0ff */
/*0860*/ FFMA R14, R16, R15, R14 ; /* 0x0000000f100e7223 */
/* 0x010fe2000000000e */
/*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0880*/ IADD3 R3, R3, 0x8, RZ ; /* 0x0000000803037810 */
/* 0x000fe40007ffe0ff */
/*0890*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */
/* 0x000fe40007ffe0ff */
/*08a0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*08b0*/ FFMA R14, R28, R21, R14 ; /* 0x000000151c0e7223 */
/* 0x004fc8000000000e */
/*08c0*/ FFMA R9, R9, R20, R14 ; /* 0x0000001409097223 */
/* 0x000fc8000000000e */
/*08d0*/ FFMA R21, R17, R18, R9 ; /* 0x0000001211157223 */
/* 0x000fe40000000009 */
/*08e0*/ IMAD.WIDE R18, R0, 0x4, R12 ; /* 0x0000000400127825 */
/* 0x000fc800078e020c */
/*08f0*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0000705670 */
/*0900*/ @!P0 BRA 0xa90 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*0910*/ MOV R9, R7 ; /* 0x0000000700097202 */
/* 0x000fe20000000f00 */
/*0920*/ IMAD.WIDE R12, R0, 0x4, R18 ; /* 0x00000004000c7825 */
/* 0x000fe400078e0212 */
/*0930*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ea4000c1e1900 */
/*0940*/ IMAD.WIDE R10, R4, 0x4, R8 ; /* 0x00000004040a7825 */
/* 0x000fc800078e0208 */
/*0950*/ IMAD.WIDE R14, R0.reuse, 0x4, R12 ; /* 0x00000004000e7825 */
/* 0x040fe200078e020c */
/*0960*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */
/* 0x000ea8000c1e1900 */
/*0970*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000ee2000c1e1900 */
/*0980*/ IMAD.WIDE R16, R0, 0x4, R14 ; /* 0x0000000400107825 */
/* 0x000fc600078e020e */
/*0990*/ LDG.E R20, [R10.64+0x4] ; /* 0x000004040a147981 */
/* 0x000ee8000c1e1900 */
/*09a0*/ LDG.E R22, [R14.64] ; /* 0x000000040e167981 */
/* 0x000f28000c1e1900 */
/*09b0*/ LDG.E R23, [R10.64+0x8] ; /* 0x000008040a177981 */
/* 0x000f28000c1e1900 */
/*09c0*/ LDG.E R25, [R10.64+0xc] ; /* 0x00000c040a197981 */
/* 0x000f68000c1e1900 */
/*09d0*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */
/* 0x000f62000c1e1900 */
/*09e0*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */
/* 0x000fc80007ffe0ff */
/*09f0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f05270 */
/*0a00*/ IADD3 R8, P1, R8, 0x10, RZ ; /* 0x0000001008087810 */
/* 0x000fe40007f3e0ff */
/*0a10*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */
/* 0x000fe40007ffe0ff */
/*0a20*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0a30*/ FFMA R9, R18, R9, R21 ; /* 0x0000000912097223 */
/* 0x004fc80000000015 */
/*0a40*/ FFMA R9, R12, R20, R9 ; /* 0x000000140c097223 */
/* 0x008fe40000000009 */
/*0a50*/ IMAD.WIDE R18, R0, 0x4, R16 ; /* 0x0000000400127825 */
/* 0x000fc800078e0210 */
/*0a60*/ FFMA R9, R22, R23, R9 ; /* 0x0000001716097223 */
/* 0x010fc80000000009 */
/*0a70*/ FFMA R21, R24, R25, R9 ; /* 0x0000001918157223 */
/* 0x020fe20000000009 */
/*0a80*/ @P0 BRA 0x910 ; /* 0xfffffe8000000947 */
/* 0x000fea000383ffff */
/*0a90*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0aa0*/ @!P0 BRA 0xb90 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0ab0*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */
/* 0x000fe200000001ff */
/*0ac0*/ IADD3 R6, R4, R3, RZ ; /* 0x0000000304067210 */
/* 0x000fe20007ffe0ff */
/*0ad0*/ IMAD R3, R3, c[0x0][0x178], R2 ; /* 0x00005e0003037a24 */
/* 0x000fd000078e0202 */
/*0ae0*/ IMAD.WIDE R6, R6, R8, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0208 */
/*0af0*/ IMAD.WIDE R8, R3, R8, c[0x0][0x168] ; /* 0x00005a0003087625 */
/* 0x000fca00078e0208 */
/*0b00*/ LDG.E R10, [R8.64] ; /* 0x00000004080a7981 */
/* 0x0000a8000c1e1900 */
/*0b10*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */
/* 0x0002a2000c1e1900 */
/*0b20*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */
/* 0x000fc80007ffe0ff */
/*0b30*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0b40*/ IMAD.WIDE R8, R0, 0x4, R8 ; /* 0x0000000400087825 */
/* 0x001fe200078e0208 */
/*0b50*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0b60*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0b70*/ FFMA R21, R10, R3, R21 ; /* 0x000000030a157223 */
/* 0x004fcc0000000015 */
/*0b80*/ @P0 BRA 0xb00 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0b90*/ IADD3 R2, R2, R4, RZ ; /* 0x0000000402027210 */
/* 0x000fe40007ffe0ff */
/*0ba0*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fca0000000f00 */
/*0bb0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */
/* 0x000fca00078e0203 */
/*0bc0*/ STG.E [R2.64], R21 ; /* 0x0000001502007986 */
/* 0x000fe2000c101904 */
/*0bd0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0be0*/ BRA 0xbe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0bf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10kernelFuncPfS_S_i
.globl _Z10kernelFuncPfS_S_i
.p2align 8
.type _Z10kernelFuncPfS_S_i,@function
_Z10kernelFuncPfS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_cmp_lt_i32 s8, 1
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_cbranch_scc1 .LBB0_3
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s2, s14, s8
v_mov_b32_e32 v0, 0
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(VALU_DEP_2)
v_mov_b32_e32 v2, v1
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s4, s2
s_addc_u32 s3, s5, s3
s_mov_b32 s4, s8
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v3, 31, v2
s_load_b32 s5, s[2:3], 0x0
s_add_i32 s4, s4, -1
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_add_nc_u32_e32 v2, s8, v2
s_cmp_lg_u32 s4, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v0, s5, v3
s_cbranch_scc1 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v0, 0
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s14, s8, v[1:2]
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s0, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10kernelFuncPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10kernelFuncPfS_S_i, .Lfunc_end0-_Z10kernelFuncPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10kernelFuncPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10kernelFuncPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0002e861_00000000-6_mm.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2066:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z4fillPfi
.type _Z4fillPfi, @function
_Z4fillPfi:
.LFB2058:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1041204193, %rdx, %rdx
sarq $35, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
movl %edx, %ecx
sall $5, %ecx
addl %ecx, %edx
subl %edx, %eax
subl $16, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2058:
.size _Z4fillPfi, .-_Z4fillPfi
.globl _Z8calc_msePfS_i
.type _Z8calc_msePfS_i, @function
_Z8calc_msePfS_i:
.LFB2059:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L14
movslq %edx, %rcx
salq $2, %rcx
movl $0, %eax
pxor %xmm0, %xmm0
.L13:
movss (%rdi,%rax), %xmm1
subss (%rsi,%rax), %xmm1
cvtss2sd %xmm1, %xmm1
mulsd %xmm1, %xmm1
addsd %xmm1, %xmm0
addq $4, %rax
cmpq %rcx, %rax
jne .L13
.L12:
pxor %xmm1, %xmm1
cvtsi2sdl %edx, %xmm1
divsd %xmm1, %xmm0
ret
.L14:
pxor %xmm0, %xmm0
jmp .L12
.cfi_endproc
.LFE2059:
.size _Z8calc_msePfS_i, .-_Z8calc_msePfS_i
.globl _Z12cpuKernel_yxPfS_S_iii
.type _Z12cpuKernel_yxPfS_S_iii, @function
_Z12cpuKernel_yxPfS_S_iii:
.LFB2060:
.cfi_startproc
endbr64
movq %rdi, %r10
movq %rsi, %r11
imull %ecx, %r8d
leal (%r8,%r9), %eax
cltq
leaq (%rdx,%rax,4), %rsi
movl $0x00000000, (%rsi)
testl %ecx, %ecx
jle .L16
movslq %r8d, %r8
leaq (%rdi,%r8,4), %rax
movslq %ecx, %rcx
leaq 0(,%rcx,4), %rdi
movslq %r9d, %r9
leaq (%r11,%r9,4), %rdx
addq %rcx, %r8
leaq (%r10,%r8,4), %rcx
.L18:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
addss (%rsi), %xmm0
movss %xmm0, (%rsi)
addq $4, %rax
addq %rdi, %rdx
cmpq %rcx, %rax
jne .L18
.L16:
ret
.cfi_endproc
.LFE2060:
.size _Z12cpuKernel_yxPfS_S_iii, .-_Z12cpuKernel_yxPfS_S_iii
.globl _Z11cpuKernel_yPfS_S_ii
.type _Z11cpuKernel_yPfS_S_ii, @function
_Z11cpuKernel_yPfS_S_ii:
.LFB2061:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L25
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movq %rdi, %r12
movq %rsi, %r13
movq %rdx, %r14
movl %ecx, %ebp
movl %r8d, %r15d
movl $0, %ebx
.L22:
movl %ebx, %r9d
movl %r15d, %r8d
movl %ebp, %ecx
movq %r14, %rdx
movq %r13, %rsi
movq %r12, %rdi
call _Z12cpuKernel_yxPfS_S_iii
addl $1, %ebx
cmpl %ebx, %ebp
jne .L22
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2061:
.size _Z11cpuKernel_yPfS_S_ii, .-_Z11cpuKernel_yPfS_S_ii
.globl _Z9cpuKernelPfS_S_i
.type _Z9cpuKernelPfS_S_i, @function
_Z9cpuKernelPfS_S_i:
.LFB2062:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movq %rdi, %r15
movq %rsi, %r14
movq %rdx, %r13
movl %ecx, %ebp
movl $0, %r12d
testl %ecx, %ecx
jle .L28
.L29:
movl $0, %ebx
.L31:
movl %ebx, %r9d
movl %r12d, %r8d
movl %ebp, %ecx
movq %r13, %rdx
movq %r14, %rsi
movq %r15, %rdi
call _Z12cpuKernel_yxPfS_S_iii
movl %ebx, %eax
addl $1, %ebx
cmpl %ebx, %ebp
jne .L31
leal 1(%r12), %edx
cmpl %eax, %r12d
je .L28
movl %edx, %r12d
jmp .L29
.L28:
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _Z9cpuKernelPfS_S_i, .-_Z9cpuKernelPfS_S_i
.globl _Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i
.type _Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i, @function
_Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i:
.LFB2088:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L40
.L36:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L41
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10kernelFuncPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L36
.L41:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i, .-_Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i
.globl _Z10kernelFuncPfS_S_i
.type _Z10kernelFuncPfS_S_i, @function
_Z10kernelFuncPfS_S_i:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z10kernelFuncPfS_S_i, .-_Z10kernelFuncPfS_S_i
.globl _Z9gpuKernelPfS_S_i
.type _Z9gpuKernelPfS_S_i, @function
_Z9gpuKernelPfS_S_i:
.LFB2063:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r14
movq %rsi, %r13
movq %rdx, %r12
movl %ecx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl %ecx, %ebx
imull %ecx, %ebx
movslq %ebx, %rbx
salq $2, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebp, 44(%rsp)
movl $1, 48(%rsp)
movl %ebp, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L48
.L45:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L49
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
movl %ebp, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z35__device_stub__Z10kernelFuncPfS_S_iPfS_S_i
jmp .L45
.L49:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size _Z9gpuKernelPfS_S_i, .-_Z9gpuKernelPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "Device Name: %s\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1080, %rsp
.cfi_def_cfa_offset 1136
movl %edi, %ebx
movq %rsi, %rbp
movq %fs:40, %rax
movq %rax, 1064(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %r12
movl $0, %esi
movq %r12, %rdi
call cudaGetDeviceProperties_v2@PLT
movq %r12, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $5, %edx
cmpl $1, %ebx
jg .L62
.L51:
movl $0, %ebx
movl $1, %ebp
.L53:
addl %ebp, %ebp
addl $1, %ebx
cmpl %edx, %ebx
jne .L53
movl %ebp, %r15d
imull %ebp, %r15d
movslq %r15d, %r14
salq $2, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %r13
movq %r14, %rdi
call malloc@PLT
movq %rax, %r12
movq %r14, %rdi
call malloc@PLT
movq %rax, (%rsp)
movq %r14, %rdi
call malloc@PLT
movq %rax, %r14
movl $0, %edi
call srand@PLT
movl %r15d, %esi
movq %r13, %rdi
call _Z4fillPfi
movl %r15d, %esi
movq %r12, %rdi
call _Z4fillPfi
call clock@PLT
movq %rax, 24(%rsp)
cmpl $10, %ebx
jle .L56
movl $0, %r8d
movl %ebp, %ecx
movq (%rsp), %r15
movq %r15, %rdx
movq %r12, %rsi
movq %r13, %rdi
call _Z11cpuKernel_yPfS_S_ii
call clock@PLT
movq %rax, 8(%rsp)
call clock@PLT
movq %rax, %rbx
movl %ebp, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq %r13, %rdi
call _Z9gpuKernelPfS_S_i
call clock@PLT
movq %rax, 16(%rsp)
movl %ebp, %edx
movq %r14, %rsi
movq %r15, %rdi
call _Z8calc_msePfS_i
cvtsd2ss %xmm0, %xmm0
jmp .L55
.L62:
movq 8(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %edx
testl %eax, %eax
jg .L51
movl $4, %edi
call malloc@PLT
movq %rax, %r13
movl $4, %edi
call malloc@PLT
movq %rax, %r12
movl $4, %edi
call malloc@PLT
movq %rax, (%rsp)
movl $4, %edi
call malloc@PLT
movq %rax, %r14
movl $0, %edi
call srand@PLT
movl $1, %esi
movq %r13, %rdi
call _Z4fillPfi
movl $1, %esi
movq %r12, %rdi
call _Z4fillPfi
call clock@PLT
movq %rax, 24(%rsp)
movl $1, %ebp
movl $1, %r15d
.L56:
movl %ebp, %ecx
movq (%rsp), %rdx
movq %r12, %rsi
movq %r13, %rdi
call _Z9cpuKernelPfS_S_i
call clock@PLT
movq %rax, 8(%rsp)
call clock@PLT
movq %rax, %rbx
movl %ebp, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq %r13, %rdi
call _Z9gpuKernelPfS_S_i
call clock@PLT
movq %rax, 16(%rsp)
movl %r15d, %edx
movq %r14, %rsi
movq (%rsp), %rdi
call _Z8calc_msePfS_i
cvtsd2ss %xmm0, %xmm0
.L55:
cvtss2sd %xmm0, %xmm0
movq 16(%rsp), %rsi
subq %rbx, %rsi
movabsq $2361183241434822607, %rcx
movq %rsi, %rax
imulq %rcx
sarq $7, %rdx
movq %rdx, %r8
sarq $63, %rsi
movq 8(%rsp), %rdi
movq 24(%rsp), %rax
subq %rax, %rdi
movq %rdi, %rax
imulq %rcx
movq %rdx, %rcx
sarq $7, %rcx
sarq $63, %rdi
subq %rdi, %rcx
subq %rsi, %r8
movl %ebp, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq (%rsp), %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
movq 1064(%rsp), %rax
subq %fs:40, %rax
jne .L63
movl $0, %eax
addq $1080, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z10kernelFuncPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z10kernelFuncPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "mm.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1512, %rsp # imm = 0x5E8
.cfi_def_cfa_offset 1568
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
leaq 40(%rsp), %r14
movq %r14, %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str, %edi
movq %r14, %rsi
xorl %eax, %eax
callq printf
movl $5, %eax
cmpl $2, %ebp
jl .LBB0_2
# %bb.1:
movq 8(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
.LBB0_2:
testl %eax, %eax
movq %rax, 32(%rsp) # 8-byte Spill
movl $1, %ebx
jle .LBB0_5
# %bb.3: # %.lr.ph.preheader
# kill: def $eax killed $eax killed $rax
.p2align 4, 0x90
.LBB0_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addl %ebx, %ebx
decl %eax
jne .LBB0_4
.LBB0_5: # %._crit_edge
movl %ebx, %ebp
imull %ebp, %ebp
leaq (,%rbp,4), %r13
movq %r13, %rdi
callq malloc
movq %rax, %r14
movq %r13, %rdi
callq malloc
movq %rax, %r15
movq %r13, %rdi
callq malloc
movq %rax, %r12
movq %r13, %rdi
callq malloc
movq %rax, 8(%rsp) # 8-byte Spill
xorl %r13d, %r13d
xorl %edi, %edi
callq srand
.p2align 4, 0x90
.LBB0_6: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1041204193, %rax, %rcx # imm = 0x3E0F83E1
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
movl %ecx, %edx
shll $5, %edx
addl %ecx, %edx
negl %edx
addl %edx, %eax
addl $-16, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r14,%r13,4)
incq %r13
cmpq %r13, %rbp
jne .LBB0_6
# %bb.7: # %.lr.ph.i54.preheader
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB0_8: # %.lr.ph.i54
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1041204193, %rax, %rcx # imm = 0x3E0F83E1
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
movl %ecx, %edx
shll $5, %edx
addl %ecx, %edx
negl %edx
addl %edx, %eax
addl $-16, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r15,%r13,4)
incq %r13
cmpq %r13, %rbp
jne .LBB0_8
# %bb.9: # %_Z4fillPfi.exit58
callq clock
cmpl $11, 32(%rsp) # 4-byte Folded Reload
movq %rax, 24(%rsp) # 8-byte Spill
jge .LBB0_17
# %bb.10:
testl %ebx, %ebx
movq 8(%rsp), %r13 # 8-byte Reload
jle .LBB0_22
# %bb.11: # %.preheader.lr.ph.i
movl %ebx, %eax
leaq (,%rax,4), %rcx
xorl %edx, %edx
movq %r14, %rsi
.p2align 4, 0x90
.LBB0_12: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB0_13 Depth 2
# Child Loop BB0_14 Depth 3
movl %edx, %edi
imull %eax, %edi
leaq (%r12,%rdi,4), %rdi
movq %r15, %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB0_13: # %.lr.ph.i.i
# Parent Loop BB0_12 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB0_14 Depth 3
movl $0, (%rdi,%r9,4)
xorps %xmm0, %xmm0
movq %r8, %r10
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB0_14: # Parent Loop BB0_12 Depth=1
# Parent Loop BB0_13 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rsi,%r11), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r10), %xmm1
addss %xmm1, %xmm0
addq $4, %r11
addq %rcx, %r10
cmpq %r11, %rcx
jne .LBB0_14
# %bb.15: # %_Z12cpuKernel_yxPfS_S_iii.exit.i
# in Loop: Header=BB0_13 Depth=2
movss %xmm0, (%rdi,%r9,4)
incq %r9
addq $4, %r8
cmpq %rax, %r9
jne .LBB0_13
# %bb.16: # %._crit_edge.i
# in Loop: Header=BB0_12 Depth=1
incq %rdx
addq %rcx, %rsi
cmpq %rax, %rdx
jne .LBB0_12
jmp .LBB0_22
.LBB0_17:
testl %ebx, %ebx
movq 8(%rsp), %r13 # 8-byte Reload
jle .LBB0_22
# %bb.18: # %.lr.ph.i64
movl %ebx, %eax
leaq (,%rax,4), %rcx
xorl %edx, %edx
movq %r15, %rsi
.p2align 4, 0x90
.LBB0_19: # %.lr.ph.i.i67
# =>This Loop Header: Depth=1
# Child Loop BB0_20 Depth 2
movl $0, (%r12,%rdx,4)
xorps %xmm0, %xmm0
movq %rsi, %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB0_20: # Parent Loop BB0_19 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%r8), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rdi), %xmm1
addss %xmm1, %xmm0
addq $4, %r8
addq %rcx, %rdi
cmpq %r8, %rcx
jne .LBB0_20
# %bb.21: # %_Z12cpuKernel_yxPfS_S_iii.exit.i73
# in Loop: Header=BB0_19 Depth=1
movss %xmm0, (%r12,%rdx,4)
incq %rdx
addq $4, %rsi
cmpq %rax, %rdx
jne .LBB0_19
.LBB0_22: # %_Z9cpuKernelPfS_S_i.exit
callq clock
movq %rax, 16(%rsp) # 8-byte Spill
callq clock
movq %rax, 8(%rsp) # 8-byte Spill
movq %r14, %rdi
movq %r15, %rsi
movq %r13, %rdx
movl %ebx, %ecx
callq _Z9gpuKernelPfS_S_i
callq clock
movq %rax, %rcx
cmpl $10, 32(%rsp) # 4-byte Folded Reload
jg .LBB0_26
# %bb.23: # %.lr.ph.i78.preheader
xorps %xmm0, %xmm0
xorl %eax, %eax
movq 24(%rsp), %rsi # 8-byte Reload
.p2align 4, 0x90
.LBB0_24: # %.lr.ph.i78
# =>This Inner Loop Header: Depth=1
movss (%r12,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
subss (%r13,%rax,4), %xmm1
cvtss2sd %xmm1, %xmm1
mulsd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %rax
cmpq %rax, %rbp
jne .LBB0_24
jmp .LBB0_31
.LBB0_26:
testl %ebx, %ebx
movq 24(%rsp), %rsi # 8-byte Reload
jle .LBB0_30
# %bb.27: # %.lr.ph.preheader.i
movl %ebx, %eax
xorps %xmm0, %xmm0
xorl %edx, %edx
movq 8(%rsp), %rdi # 8-byte Reload
.p2align 4, 0x90
.LBB0_28: # %.lr.ph.i84
# =>This Inner Loop Header: Depth=1
movss (%r12,%rdx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
subss (%r13,%rdx,4), %xmm1
cvtss2sd %xmm1, %xmm1
mulsd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %rdx
cmpq %rdx, %rax
jne .LBB0_28
# %bb.29:
movl %ebx, %ebp
jmp .LBB0_32
.LBB0_30:
xorps %xmm0, %xmm0
movl %ebx, %ebp
.LBB0_31: # %_Z8calc_msePfS_i.exit
movq 8(%rsp), %rdi # 8-byte Reload
.LBB0_32: # %_Z8calc_msePfS_i.exit
movq 16(%rsp), %rax # 8-byte Reload
xorps %xmm1, %xmm1
cvtsi2sd %ebp, %xmm1
divsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
subq %rsi, %rax
movabsq $2361183241434822607, %rsi # imm = 0x20C49BA5E353F7CF
imulq %rsi
movq %rdx, %r8
movq %rdx, %rax
shrq $63, %rax
sarq $7, %r8
addq %rax, %r8
subq %rdi, %rcx
movq %rcx, %rax
imulq %rsi
movq %rdx, %rcx
shrq $63, %rcx
sarq $7, %rdx
addq %rdx, %rcx
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movl %ebx, %esi
movq %r8, %rdx
movb $1, %al
callq printf
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
movq %r13, %rdi
callq free
xorl %eax, %eax
addq $1512, %rsp # imm = 0x5E8
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.globl _Z4fillPfi # -- Begin function _Z4fillPfi
.p2align 4, 0x90
.type _Z4fillPfi,@function
_Z4fillPfi: # @_Z4fillPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1041204193, %rax, %rcx # imm = 0x3E0F83E1
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
movl %ecx, %edx
shll $5, %edx
addl %ecx, %edx
negl %edx
addl %edx, %eax
addl $-16, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z4fillPfi, .Lfunc_end1-_Z4fillPfi
.cfi_endproc
# -- End function
.globl _Z9cpuKernelPfS_S_i # -- Begin function _Z9cpuKernelPfS_S_i
.p2align 4, 0x90
.type _Z9cpuKernelPfS_S_i,@function
_Z9cpuKernelPfS_S_i: # @_Z9cpuKernelPfS_S_i
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB2_8
# %bb.1: # %.preheader.lr.ph
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl %ecx, %eax
leaq (,%rax,4), %rcx
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB2_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
# Child Loop BB2_4 Depth 3
movl %r8d, %r9d
imull %eax, %r9d
leaq (%rdx,%r9,4), %r9
movq %rsi, %r10
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i
# Parent Loop BB2_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_4 Depth 3
movl $0, (%r9,%r11,4)
xorps %xmm0, %xmm0
movq %r10, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_4: # Parent Loop BB2_2 Depth=1
# Parent Loop BB2_3 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rdi,%r14), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rbx), %xmm1
addss %xmm1, %xmm0
movss %xmm0, (%r9,%r11,4)
addq $4, %r14
addq %rcx, %rbx
cmpq %r14, %rcx
jne .LBB2_4
# %bb.5: # %_Z12cpuKernel_yxPfS_S_iii.exit
# in Loop: Header=BB2_3 Depth=2
incq %r11
addq $4, %r10
cmpq %rax, %r11
jne .LBB2_3
# %bb.6: # %._crit_edge
# in Loop: Header=BB2_2 Depth=1
incq %r8
addq %rcx, %rdi
cmpq %rax, %r8
jne .LBB2_2
# %bb.7:
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.LBB2_8: # %._crit_edge17
retq
.Lfunc_end2:
.size _Z9cpuKernelPfS_S_i, .Lfunc_end2-_Z9cpuKernelPfS_S_i
.cfi_endproc
# -- End function
.globl _Z11cpuKernel_yPfS_S_ii # -- Begin function _Z11cpuKernel_yPfS_S_ii
.p2align 4, 0x90
.type _Z11cpuKernel_yPfS_S_ii,@function
_Z11cpuKernel_yPfS_S_ii: # @_Z11cpuKernel_yPfS_S_ii
.cfi_startproc
# %bb.0:
testl %ecx, %ecx
jle .LBB3_5
# %bb.1: # %.lr.ph
imull %ecx, %r8d
movslq %r8d, %r8
movl %ecx, %eax
leaq (%rdi,%r8,4), %rcx
leaq (%rdx,%r8,4), %rdx
leaq (,%rax,4), %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB3_2: # %.lr.ph.i
# =>This Loop Header: Depth=1
# Child Loop BB3_3 Depth 2
movl $0, (%rdx,%r8,4)
xorps %xmm0, %xmm0
movq %rsi, %r9
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB3_3: # Parent Loop BB3_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rcx,%r10), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r9), %xmm1
addss %xmm1, %xmm0
movss %xmm0, (%rdx,%r8,4)
addq $4, %r10
addq %rdi, %r9
cmpq %r10, %rdi
jne .LBB3_3
# %bb.4: # %_Z12cpuKernel_yxPfS_S_iii.exit
# in Loop: Header=BB3_2 Depth=1
incq %r8
addq $4, %rsi
cmpq %rax, %r8
jne .LBB3_2
.LBB3_5: # %._crit_edge
retq
.Lfunc_end3:
.size _Z11cpuKernel_yPfS_S_ii, .Lfunc_end3-_Z11cpuKernel_yPfS_S_ii
.cfi_endproc
# -- End function
.globl _Z9gpuKernelPfS_S_i # -- Begin function _Z9gpuKernelPfS_S_i
.p2align 4, 0x90
.type _Z9gpuKernelPfS_S_i,@function
_Z9gpuKernelPfS_S_i: # @_Z9gpuKernelPfS_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %ebp
movq %rdx, %rbx
movq %rsi, %r15
movq %rdi, %r12
movl %ecx, %r14d
imull %r14d, %r14d
shlq $2, %r14
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl %ebp, %eax
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rax, %rdi
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebp, 36(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z10kernelFuncPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_2:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z9gpuKernelPfS_S_i, .Lfunc_end4-_Z9gpuKernelPfS_S_i
.cfi_endproc
# -- End function
.globl _Z8calc_msePfS_i # -- Begin function _Z8calc_msePfS_i
.p2align 4, 0x90
.type _Z8calc_msePfS_i,@function
_Z8calc_msePfS_i: # @_Z8calc_msePfS_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB5_1
# %bb.2: # %.lr.ph.preheader
movl %edx, %eax
xorpd %xmm0, %xmm0
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
subss (%rsi,%rcx,4), %xmm1
cvtss2sd %xmm1, %xmm1
mulsd %xmm1, %xmm1
addsd %xmm1, %xmm0
incq %rcx
cmpq %rcx, %rax
jne .LBB5_3
jmp .LBB5_4
.LBB5_1:
xorpd %xmm0, %xmm0
.LBB5_4: # %._crit_edge
xorps %xmm1, %xmm1
cvtsi2sd %edx, %xmm1
divsd %xmm1, %xmm0
retq
.Lfunc_end5:
.size _Z8calc_msePfS_i, .Lfunc_end5-_Z8calc_msePfS_i
.cfi_endproc
# -- End function
.globl _Z12cpuKernel_yxPfS_S_iii # -- Begin function _Z12cpuKernel_yxPfS_S_iii
.p2align 4, 0x90
.type _Z12cpuKernel_yxPfS_S_iii,@function
_Z12cpuKernel_yxPfS_S_iii: # @_Z12cpuKernel_yxPfS_S_iii
.cfi_startproc
# %bb.0:
# kill: def $r9d killed $r9d def $r9
# kill: def $r8d killed $r8d def $r8
imull %ecx, %r8d
leal (%r8,%r9), %eax
cltq
movl $0, (%rdx,%rax,4)
testl %ecx, %ecx
jle .LBB6_3
# %bb.1: # %.lr.ph
movss (%rdx,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movslq %r8d, %r8
movslq %r9d, %r9
movl %ecx, %ecx
leaq (%rdi,%r8,4), %rdi
leaq (%rsi,%r9,4), %rsi
leaq (,%rcx,4), %r8
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB6_2: # =>This Inner Loop Header: Depth=1
movss (%rdi,%r9,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rsi), %xmm1
addss %xmm1, %xmm0
movss %xmm0, (%rdx,%rax,4)
incq %r9
addq %r8, %rsi
cmpq %r9, %rcx
jne .LBB6_2
.LBB6_3: # %._crit_edge
retq
.Lfunc_end6:
.size _Z12cpuKernel_yxPfS_S_iii, .Lfunc_end6-_Z12cpuKernel_yxPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z25__device_stub__kernelFuncPfS_S_i # -- Begin function _Z25__device_stub__kernelFuncPfS_S_i
.p2align 4, 0x90
.type _Z25__device_stub__kernelFuncPfS_S_i,@function
_Z25__device_stub__kernelFuncPfS_S_i: # @_Z25__device_stub__kernelFuncPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10kernelFuncPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end7:
.size _Z25__device_stub__kernelFuncPfS_S_i, .Lfunc_end7-_Z25__device_stub__kernelFuncPfS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10kernelFuncPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Device Name: %s\n"
.size .L.str, 17
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "n=%d\t CPU=%06ld ms GPU=%06ld ms mse=%f\n"
.size .L.str.1, 40
.type _Z10kernelFuncPfS_S_i,@object # @_Z10kernelFuncPfS_S_i
.section .rodata,"a",@progbits
.globl _Z10kernelFuncPfS_S_i
.p2align 3, 0x0
_Z10kernelFuncPfS_S_i:
.quad _Z25__device_stub__kernelFuncPfS_S_i
.size _Z10kernelFuncPfS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10kernelFuncPfS_S_i"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__kernelFuncPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10kernelFuncPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*Este programa recibe un archivo CSV con 64 LiDAR data packets y
devuelve un vector de 16384 valores en double con informacion de radios de los puntos escaneados*/
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#define NUM_POINTS 16384
__global__
void Conversion(float* r, unsigned long int* encoder_count, float* altitude, float* azimuth, float* point_cloud)
{
int azimuth_block, channel;
unsigned long int counter;
float theta, phi;
int i = blockIdx.x * blockDim.x + threadIdx.x;
azimuth_block = i / 16;
counter = (encoder_count[0] + azimuth_block * 88) % 90112;
channel = i % 16;
theta = (float)(2 * M_PI * (counter / 90112.0 + azimuth[channel] / 360.0));
phi = (float)(2 * M_PI * altitude[channel] / 360.0);
point_cloud[0 + 3 * i] = (float)(r[i] * cos(theta) * cos(phi));//x
point_cloud[1 + 3 * i] = (float)(-r[i] * sin(theta) * cos(phi));//y
point_cloud[2 + 3 * i] = (float)(r[i] * sin(phi));//z
}
int main(void)
{
///////Bloque 1: Abrir y leer los archivos Donut y beam_intrinsics y //////
int i = 0;
const int N_LINE = 128;//numero maximo de caracteres a leer en cada linea
char line[N_LINE];
FILE* document;
document = fopen("Donut_1024x16.csv", "r");//abrir el archivo
if (!document) {//revisar si fue correctamente abierto
perror("File opening failed");
return 0;
}
float* h_r = NULL;//radios
size_t bytes_r = NUM_POINTS*sizeof(float);
h_r = (float*)malloc(bytes_r);
unsigned long int h_encoder_count = 0;//contador inicial del encoder (luego crece en 88 ticks)
int offset = 0;
unsigned long int word = 0;
int channel = 2;
int azimuth_block = 0;
int lidar_packet = 0;
int idx_line;//indice palabra a leer
int j = 1;//numero de linea
while (fgets(line, N_LINE, document) != NULL)
{
//obtener el primer valor de encoder_count
if (j == 13) h_encoder_count = atoi(line);
if (j == 14) h_encoder_count = atoi(line) << 8 | h_encoder_count;
//leer radios del archivo Donut
idx_line = 17 + 12 * channel + 788 * azimuth_block + 12608 * lidar_packet;
if (j == idx_line) word = (unsigned long int) atoi(line);
if (j == idx_line + 1) word = (unsigned long int) (atoi(line) << 8) | word;
if (j == idx_line + 2) word = (unsigned long int) ((atoi(line) & 0x0000000F)<<16) | word;
if (j > (idx_line + 2))//si se leyo el radio, pasar al sgte channel
{
h_r[offset] = (float)word;
offset++;
channel += 4;
}
if (channel >= 64)//si se terminaron los channels del bloque, pasar al sgte azimuth block
{
channel = 2;
azimuth_block++;
}
if (azimuth_block >= 16)//si se terminaron los azimuth blocks, pasar al sgte lidar packet
{
azimuth_block = 0;
lidar_packet++;
}
if (lidar_packet >= 64) break;//si se terminaron los lidar packets, salir
j++;
}
fclose(document);
//printf("%ld\n",h_encoder_count);
//for(i=0;i<100;i++) printf("%.3f\n",h_r[i]);
//lectura del archivo beam_intrinsics
document = fopen("beam_intrinsics.csv", "r");//abrir el archivo
if (!document) {//revisar si fue correctamente abierto
perror("File opening failed");
return 0;
}
float *h_altitude = NULL;
float *h_azimuth = NULL;
size_t bytes_angles = 16 * sizeof(float);//16 channels
h_altitude = (float*)malloc(bytes_angles);
h_azimuth = (float*)malloc(bytes_angles);
j = 1;
while (fgets(line, N_LINE, document) != NULL)
{
//leer altitute angles
if (j == 2) offset = 0;
if (j >= 2 && j <= 65)
{
if (j % 4 == 0)
{
h_altitude[offset] = (float)atof(line);
offset++;
}
}
//leer azimuth angles
if (j == 68) offset = 0;
if (j >= 68 && j <= 131)
{
if ((j - 66) % 4 == 0)
{
h_azimuth[offset] = (float)atof(line);
offset++;
}
}
j++;
}
fclose(document);
//for(i=0;i<16;i++) printf("%.3f\n",h_altitude[i]);
//for(i=0;i<16;i++) printf("%.3f\n",h_azimuth[i]);
///////Fin del Bloque 1///////
///////Bloque 2: Conversion a coordenadas cartesianas///////
//reservar memoria para el puntero de salida
float *h_point_cloud = NULL;
h_point_cloud = (float*)malloc(3 * bytes_r);
//declaracion de variables y reserva de memoria en el GPU
float *d_point_cloud = NULL;//arreglo con los puntos en coordenadas cartesianas
//formato: x1y1z1 x2y2z2 x3y3z3 ...
float *d_r = NULL;
float *d_azimuth = NULL;
float *d_altitude = NULL;
unsigned long int* d_encoder_count;
cudaMalloc(&d_point_cloud, 3*bytes_r);
cudaMalloc(&d_r, bytes_r);
cudaMalloc(&d_azimuth, bytes_angles);
cudaMalloc(&d_altitude, bytes_angles);
cudaMalloc(&d_encoder_count, sizeof(unsigned long int));
//mover data a GPU
cudaMemcpy(d_r,h_r,bytes_r,cudaMemcpyHostToDevice);
cudaMemcpy(d_azimuth,h_azimuth,bytes_angles,cudaMemcpyHostToDevice);
cudaMemcpy(d_altitude,h_altitude,bytes_angles,cudaMemcpyHostToDevice);
cudaMemcpy(d_encoder_count,&h_encoder_count,sizeof(unsigned long int),cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int BlockSize = NUM_POINTS/16;
int GridSize = 16;
//lanzar el kernel
cudaEventRecord(start);
Conversion<<<GridSize,BlockSize>>>(d_r, d_encoder_count, d_altitude, d_azimuth, d_point_cloud);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Kernel's elapsed time: %.3f ms\n",milliseconds);
//mover data de salida al CPU
cudaMemcpy(h_point_cloud, d_point_cloud, 3 * bytes_r, cudaMemcpyDeviceToHost);
///////Fin del Bloque 2///////
///////Bloque 3: Escribir los puntos en un documento de salida (Output_file.csv)///////
//abrir el documento a llenar
document = fopen("Output_file.csv", "w");
if (!document) {
perror("File opening failed");
return 0;
}
//llenar el documento con datos
for (i = 0; i < NUM_POINTS; i++)
{
for (j = 0; j < 2; j++) fprintf(document, "%.4f, ", h_point_cloud[j + i * 3]);
fprintf(document, "%.4f\n ", h_point_cloud[j + i * 3]);
}
fclose(document);
printf("Success!\n");
///////Fin del Bloque 3///////
//liberar memoria
free(document);
free(h_r), free(h_altitude), free(h_azimuth), free(h_point_cloud);
cudaFree(d_r), cudaFree(d_altitude), cudaFree(d_azimuth), cudaFree(d_point_cloud), cudaFree(d_encoder_count);
return 1;
} | .file "tmpxft_0012abb3_00000000-6_Leer_LiDAR_data_paralelo.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_
.type _Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_, @function
_Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z10ConversionPfPmS_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_, .-_Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_
.globl _Z10ConversionPfPmS_S_S_
.type _Z10ConversionPfPmS_S_S_, @function
_Z10ConversionPfPmS_S_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10ConversionPfPmS_S_S_, .-_Z10ConversionPfPmS_S_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "r"
.LC1:
.string "Donut_1024x16.csv"
.LC2:
.string "File opening failed"
.LC3:
.string "beam_intrinsics.csv"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "Kernel's elapsed time: %.3f ms\n"
.section .rodata.str1.1
.LC6:
.string "w"
.LC7:
.string "Output_file.csv"
.LC8:
.string "%.4f, "
.LC9:
.string "%.4f\n "
.LC10:
.string "Success!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $280, %rsp
.cfi_def_cfa_offset 336
movq %fs:40, %rax
movq %rax, 264(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
leaq .LC1(%rip), %rdi
call fopen@PLT
movq %rax, 16(%rsp)
testq %rax, %rax
je .L44
movl $65536, %edi
call malloc@PLT
movq %rax, 24(%rsp)
movq $0, 40(%rsp)
movl $1, %ebp
movl $0, %r15d
movl $0, %r14d
movl $2, %r12d
movq $0, 8(%rsp)
movl $0, %r13d
leaq 128(%rsp), %rax
movq %rax, (%rsp)
jmp .L14
.L44:
leaq .LC2(%rip), %rdi
call perror@PLT
movl $0, %eax
jmp .L11
.L45:
movl $10, %edx
movl $0, %esi
movq (%rsp), %rdi
call __isoc23_strtol@PLT
cltq
movq %rax, 40(%rsp)
leal (%r12,%r12,2), %eax
imull $788, %r14d, %edx
leal 17(%rdx,%rax,4), %ebx
imull $12608, %r15d, %eax
addl %eax, %ebx
cmpl %ebx, %ebp
jne .L17
.L16:
movl $10, %edx
movl $0, %esi
movq (%rsp), %rdi
call __isoc23_strtol@PLT
cltq
movq %rax, 8(%rsp)
jmp .L37
.L46:
movl $10, %edx
movl $0, %esi
movq (%rsp), %rdi
call __isoc23_strtol@PLT
sall $8, %eax
cltq
orq %rax, 40(%rsp)
leal (%r12,%r12,2), %eax
imull $788, %r14d, %edx
leal 17(%rdx,%rax,4), %ebx
imull $12608, %r15d, %eax
addl %eax, %ebx
jmp .L19
.L47:
movl $10, %edx
movl $0, %esi
movq (%rsp), %rdi
call __isoc23_strtol@PLT
sall $8, %eax
cltq
orq %rax, 8(%rsp)
jmp .L17
.L48:
movl $10, %edx
movl $0, %esi
movq (%rsp), %rdi
call __isoc23_strtol@PLT
sall $16, %eax
andl $983040, %eax
orq %rax, 8(%rsp)
jmp .L37
.L22:
movq 8(%rsp), %rcx
movq %rcx, %rdx
shrq %rdx
andl $1, %ecx
orq %rcx, %rdx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
addss %xmm0, %xmm0
.L23:
movq 24(%rsp), %rcx
movss %xmm0, (%rcx,%rax,4)
addl $1, %r13d
addl $4, %r12d
cmpl $63, %r12d
jle .L37
addl $1, %r14d
cmpl $15, %r14d
jle .L39
addl $1, %r15d
cmpl $63, %r15d
jg .L26
movl $0, %r14d
movl $2, %r12d
.L37:
addl $1, %ebp
.L14:
movq 16(%rsp), %rcx
movl $128, %edx
movl $128, %esi
movq (%rsp), %rdi
call __fgets_chk@PLT
testq %rax, %rax
je .L26
cmpl $13, %ebp
je .L45
cmpl $14, %ebp
je .L46
leal (%r12,%r12,2), %eax
imull $788, %r14d, %edx
leal 17(%rdx,%rax,4), %ebx
imull $12608, %r15d, %eax
addl %eax, %ebx
cmpl %ebx, %ebp
je .L16
.L19:
leal 1(%rbx), %eax
cmpl %ebp, %eax
je .L47
.L17:
addl $2, %ebx
cmpl %ebp, %ebx
je .L48
jge .L37
movslq %r13d, %rax
movq 8(%rsp), %rcx
testq %rcx, %rcx
js .L22
pxor %xmm0, %xmm0
cvtsi2ssq %rcx, %xmm0
jmp .L23
.L26:
movq 16(%rsp), %rdi
call fclose@PLT
leaq .LC0(%rip), %rsi
leaq .LC3(%rip), %rdi
call fopen@PLT
movq %rax, %rbp
testq %rax, %rax
je .L49
movl $64, %edi
call malloc@PLT
movq %rax, (%rsp)
movl $64, %edi
call malloc@PLT
movq %rax, 8(%rsp)
movl $1, %ebx
leaq 128(%rsp), %r12
jmp .L29
.L49:
leaq .LC2(%rip), %rdi
call perror@PLT
movl $0, %eax
jmp .L11
.L53:
testb $3, %bl
jne .L32
movl $0, %esi
movq %r12, %rdi
call strtod@PLT
movslq %r13d, %rax
cvtsd2ss %xmm0, %xmm0
movq (%rsp), %rcx
movss %xmm0, (%rcx,%rax,4)
addl $1, %r13d
jmp .L32
.L54:
leal -68(%rbx), %eax
cmpl $63, %eax
ja .L32
leal -66(%rbx), %eax
testb $3, %al
jne .L32
movl $0, %esi
movq %r12, %rdi
call strtod@PLT
movslq %r13d, %rax
cvtsd2ss %xmm0, %xmm0
movq 8(%rsp), %rcx
movss %xmm0, (%rcx,%rax,4)
addl $1, %r13d
jmp .L32
.L52:
movq %rbp, %rdi
call fclose@PLT
movl $196608, %edi
call malloc@PLT
movq %rax, %r15
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movq $0, 72(%rsp)
leaq 48(%rsp), %rdi
movl $196608, %esi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $65536, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movl $8, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $65536, %edx
movq 24(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $64, %edx
movq 8(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $64, %edx
movq (%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
leaq 40(%rsp), %rsi
movl $1, %ecx
movl $8, %edx
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
leaq 88(%rsp), %rdi
call cudaEventCreate@PLT
leaq 96(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 88(%rsp), %rdi
call cudaEventRecord@PLT
movl $1024, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $16, 104(%rsp)
movl $1, 108(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 116(%rsp), %rdx
movl $1, %ecx
movq 104(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L50
.L34:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 96(%rsp), %rdi
call cudaEventRecord@PLT
movq 96(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 116(%rsp)
leaq 116(%rsp), %rdi
movq 96(%rsp), %rdx
movq 88(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 116(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $196608, %edx
movq 48(%rsp), %rsi
movq %r15, %rdi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
leaq .LC7(%rip), %rdi
call fopen@PLT
movq %rax, %rbp
testq %rax, %rax
je .L35
movq %r15, %rbx
leaq 196608(%r15), %r14
leaq .LC8(%rip), %r12
leaq .LC9(%rip), %r13
.L36:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $1, %eax
call __fprintf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 4(%rbx), %xmm0
movq %r12, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $1, %eax
call __fprintf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 8(%rbx), %xmm0
movq %r13, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $1, %eax
call __fprintf_chk@PLT
addq $12, %rbx
cmpq %rbx, %r14
jne .L36
movq %rbp, %rdi
call fclose@PLT
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movq (%rsp), %rdi
call free@PLT
movq 8(%rsp), %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movl $1, %eax
.L11:
movq 264(%rsp), %rdx
subq %fs:40, %rdx
jne .L51
addq $280, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L50:
.cfi_restore_state
movq 48(%rsp), %r8
movq 64(%rsp), %rcx
movq 72(%rsp), %rdx
movq 80(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_
jmp .L34
.L35:
leaq .LC2(%rip), %rdi
call perror@PLT
movl $0, %eax
jmp .L11
.L40:
movl $0, %r13d
.L32:
addl $1, %ebx
.L29:
movq %rbp, %rcx
movl $128, %edx
movl $128, %esi
movq %r12, %rdi
call __fgets_chk@PLT
testq %rax, %rax
je .L52
cmpl $2, %ebx
je .L40
leal -2(%rbx), %eax
cmpl $63, %eax
jbe .L53
cmpl $68, %ebx
jne .L54
movl $0, %r13d
jmp .L32
.L39:
movl $2, %r12d
jmp .L37
.L51:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "_Z10ConversionPfPmS_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z10ConversionPfPmS_S_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*Este programa recibe un archivo CSV con 64 LiDAR data packets y
devuelve un vector de 16384 valores en double con informacion de radios de los puntos escaneados*/
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#define NUM_POINTS 16384
__global__
void Conversion(float* r, unsigned long int* encoder_count, float* altitude, float* azimuth, float* point_cloud)
{
int azimuth_block, channel;
unsigned long int counter;
float theta, phi;
int i = blockIdx.x * blockDim.x + threadIdx.x;
azimuth_block = i / 16;
counter = (encoder_count[0] + azimuth_block * 88) % 90112;
channel = i % 16;
theta = (float)(2 * M_PI * (counter / 90112.0 + azimuth[channel] / 360.0));
phi = (float)(2 * M_PI * altitude[channel] / 360.0);
point_cloud[0 + 3 * i] = (float)(r[i] * cos(theta) * cos(phi));//x
point_cloud[1 + 3 * i] = (float)(-r[i] * sin(theta) * cos(phi));//y
point_cloud[2 + 3 * i] = (float)(r[i] * sin(phi));//z
}
int main(void)
{
///////Bloque 1: Abrir y leer los archivos Donut y beam_intrinsics y //////
int i = 0;
const int N_LINE = 128;//numero maximo de caracteres a leer en cada linea
char line[N_LINE];
FILE* document;
document = fopen("Donut_1024x16.csv", "r");//abrir el archivo
if (!document) {//revisar si fue correctamente abierto
perror("File opening failed");
return 0;
}
float* h_r = NULL;//radios
size_t bytes_r = NUM_POINTS*sizeof(float);
h_r = (float*)malloc(bytes_r);
unsigned long int h_encoder_count = 0;//contador inicial del encoder (luego crece en 88 ticks)
int offset = 0;
unsigned long int word = 0;
int channel = 2;
int azimuth_block = 0;
int lidar_packet = 0;
int idx_line;//indice palabra a leer
int j = 1;//numero de linea
while (fgets(line, N_LINE, document) != NULL)
{
//obtener el primer valor de encoder_count
if (j == 13) h_encoder_count = atoi(line);
if (j == 14) h_encoder_count = atoi(line) << 8 | h_encoder_count;
//leer radios del archivo Donut
idx_line = 17 + 12 * channel + 788 * azimuth_block + 12608 * lidar_packet;
if (j == idx_line) word = (unsigned long int) atoi(line);
if (j == idx_line + 1) word = (unsigned long int) (atoi(line) << 8) | word;
if (j == idx_line + 2) word = (unsigned long int) ((atoi(line) & 0x0000000F)<<16) | word;
if (j > (idx_line + 2))//si se leyo el radio, pasar al sgte channel
{
h_r[offset] = (float)word;
offset++;
channel += 4;
}
if (channel >= 64)//si se terminaron los channels del bloque, pasar al sgte azimuth block
{
channel = 2;
azimuth_block++;
}
if (azimuth_block >= 16)//si se terminaron los azimuth blocks, pasar al sgte lidar packet
{
azimuth_block = 0;
lidar_packet++;
}
if (lidar_packet >= 64) break;//si se terminaron los lidar packets, salir
j++;
}
fclose(document);
//printf("%ld\n",h_encoder_count);
//for(i=0;i<100;i++) printf("%.3f\n",h_r[i]);
//lectura del archivo beam_intrinsics
document = fopen("beam_intrinsics.csv", "r");//abrir el archivo
if (!document) {//revisar si fue correctamente abierto
perror("File opening failed");
return 0;
}
float *h_altitude = NULL;
float *h_azimuth = NULL;
size_t bytes_angles = 16 * sizeof(float);//16 channels
h_altitude = (float*)malloc(bytes_angles);
h_azimuth = (float*)malloc(bytes_angles);
j = 1;
while (fgets(line, N_LINE, document) != NULL)
{
//leer altitute angles
if (j == 2) offset = 0;
if (j >= 2 && j <= 65)
{
if (j % 4 == 0)
{
h_altitude[offset] = (float)atof(line);
offset++;
}
}
//leer azimuth angles
if (j == 68) offset = 0;
if (j >= 68 && j <= 131)
{
if ((j - 66) % 4 == 0)
{
h_azimuth[offset] = (float)atof(line);
offset++;
}
}
j++;
}
fclose(document);
//for(i=0;i<16;i++) printf("%.3f\n",h_altitude[i]);
//for(i=0;i<16;i++) printf("%.3f\n",h_azimuth[i]);
///////Fin del Bloque 1///////
///////Bloque 2: Conversion a coordenadas cartesianas///////
//reservar memoria para el puntero de salida
float *h_point_cloud = NULL;
h_point_cloud = (float*)malloc(3 * bytes_r);
//declaracion de variables y reserva de memoria en el GPU
float *d_point_cloud = NULL;//arreglo con los puntos en coordenadas cartesianas
//formato: x1y1z1 x2y2z2 x3y3z3 ...
float *d_r = NULL;
float *d_azimuth = NULL;
float *d_altitude = NULL;
unsigned long int* d_encoder_count;
cudaMalloc(&d_point_cloud, 3*bytes_r);
cudaMalloc(&d_r, bytes_r);
cudaMalloc(&d_azimuth, bytes_angles);
cudaMalloc(&d_altitude, bytes_angles);
cudaMalloc(&d_encoder_count, sizeof(unsigned long int));
//mover data a GPU
cudaMemcpy(d_r,h_r,bytes_r,cudaMemcpyHostToDevice);
cudaMemcpy(d_azimuth,h_azimuth,bytes_angles,cudaMemcpyHostToDevice);
cudaMemcpy(d_altitude,h_altitude,bytes_angles,cudaMemcpyHostToDevice);
cudaMemcpy(d_encoder_count,&h_encoder_count,sizeof(unsigned long int),cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int BlockSize = NUM_POINTS/16;
int GridSize = 16;
//lanzar el kernel
cudaEventRecord(start);
Conversion<<<GridSize,BlockSize>>>(d_r, d_encoder_count, d_altitude, d_azimuth, d_point_cloud);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Kernel's elapsed time: %.3f ms\n",milliseconds);
//mover data de salida al CPU
cudaMemcpy(h_point_cloud, d_point_cloud, 3 * bytes_r, cudaMemcpyDeviceToHost);
///////Fin del Bloque 2///////
///////Bloque 3: Escribir los puntos en un documento de salida (Output_file.csv)///////
//abrir el documento a llenar
document = fopen("Output_file.csv", "w");
if (!document) {
perror("File opening failed");
return 0;
}
//llenar el documento con datos
for (i = 0; i < NUM_POINTS; i++)
{
for (j = 0; j < 2; j++) fprintf(document, "%.4f, ", h_point_cloud[j + i * 3]);
fprintf(document, "%.4f\n ", h_point_cloud[j + i * 3]);
}
fclose(document);
printf("Success!\n");
///////Fin del Bloque 3///////
//liberar memoria
free(document);
free(h_r), free(h_altitude), free(h_azimuth), free(h_point_cloud);
cudaFree(d_r), cudaFree(d_altitude), cudaFree(d_azimuth), cudaFree(d_point_cloud), cudaFree(d_encoder_count);
return 1;
} | /*Este programa recibe un archivo CSV con 64 LiDAR data packets y
devuelve un vector de 16384 valores en double con informacion de radios de los puntos escaneados*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#define NUM_POINTS 16384
__global__
void Conversion(float* r, unsigned long int* encoder_count, float* altitude, float* azimuth, float* point_cloud)
{
int azimuth_block, channel;
unsigned long int counter;
float theta, phi;
int i = blockIdx.x * blockDim.x + threadIdx.x;
azimuth_block = i / 16;
counter = (encoder_count[0] + azimuth_block * 88) % 90112;
channel = i % 16;
theta = (float)(2 * M_PI * (counter / 90112.0 + azimuth[channel] / 360.0));
phi = (float)(2 * M_PI * altitude[channel] / 360.0);
point_cloud[0 + 3 * i] = (float)(r[i] * cos(theta) * cos(phi));//x
point_cloud[1 + 3 * i] = (float)(-r[i] * sin(theta) * cos(phi));//y
point_cloud[2 + 3 * i] = (float)(r[i] * sin(phi));//z
}
int main(void)
{
///////Bloque 1: Abrir y leer los archivos Donut y beam_intrinsics y //////
int i = 0;
const int N_LINE = 128;//numero maximo de caracteres a leer en cada linea
char line[N_LINE];
FILE* document;
document = fopen("Donut_1024x16.csv", "r");//abrir el archivo
if (!document) {//revisar si fue correctamente abierto
perror("File opening failed");
return 0;
}
float* h_r = NULL;//radios
size_t bytes_r = NUM_POINTS*sizeof(float);
h_r = (float*)malloc(bytes_r);
unsigned long int h_encoder_count = 0;//contador inicial del encoder (luego crece en 88 ticks)
int offset = 0;
unsigned long int word = 0;
int channel = 2;
int azimuth_block = 0;
int lidar_packet = 0;
int idx_line;//indice palabra a leer
int j = 1;//numero de linea
while (fgets(line, N_LINE, document) != NULL)
{
//obtener el primer valor de encoder_count
if (j == 13) h_encoder_count = atoi(line);
if (j == 14) h_encoder_count = atoi(line) << 8 | h_encoder_count;
//leer radios del archivo Donut
idx_line = 17 + 12 * channel + 788 * azimuth_block + 12608 * lidar_packet;
if (j == idx_line) word = (unsigned long int) atoi(line);
if (j == idx_line + 1) word = (unsigned long int) (atoi(line) << 8) | word;
if (j == idx_line + 2) word = (unsigned long int) ((atoi(line) & 0x0000000F)<<16) | word;
if (j > (idx_line + 2))//si se leyo el radio, pasar al sgte channel
{
h_r[offset] = (float)word;
offset++;
channel += 4;
}
if (channel >= 64)//si se terminaron los channels del bloque, pasar al sgte azimuth block
{
channel = 2;
azimuth_block++;
}
if (azimuth_block >= 16)//si se terminaron los azimuth blocks, pasar al sgte lidar packet
{
azimuth_block = 0;
lidar_packet++;
}
if (lidar_packet >= 64) break;//si se terminaron los lidar packets, salir
j++;
}
fclose(document);
//printf("%ld\n",h_encoder_count);
//for(i=0;i<100;i++) printf("%.3f\n",h_r[i]);
//lectura del archivo beam_intrinsics
document = fopen("beam_intrinsics.csv", "r");//abrir el archivo
if (!document) {//revisar si fue correctamente abierto
perror("File opening failed");
return 0;
}
float *h_altitude = NULL;
float *h_azimuth = NULL;
size_t bytes_angles = 16 * sizeof(float);//16 channels
h_altitude = (float*)malloc(bytes_angles);
h_azimuth = (float*)malloc(bytes_angles);
j = 1;
while (fgets(line, N_LINE, document) != NULL)
{
//leer altitute angles
if (j == 2) offset = 0;
if (j >= 2 && j <= 65)
{
if (j % 4 == 0)
{
h_altitude[offset] = (float)atof(line);
offset++;
}
}
//leer azimuth angles
if (j == 68) offset = 0;
if (j >= 68 && j <= 131)
{
if ((j - 66) % 4 == 0)
{
h_azimuth[offset] = (float)atof(line);
offset++;
}
}
j++;
}
fclose(document);
//for(i=0;i<16;i++) printf("%.3f\n",h_altitude[i]);
//for(i=0;i<16;i++) printf("%.3f\n",h_azimuth[i]);
///////Fin del Bloque 1///////
///////Bloque 2: Conversion a coordenadas cartesianas///////
//reservar memoria para el puntero de salida
float *h_point_cloud = NULL;
h_point_cloud = (float*)malloc(3 * bytes_r);
//declaracion de variables y reserva de memoria en el GPU
float *d_point_cloud = NULL;//arreglo con los puntos en coordenadas cartesianas
//formato: x1y1z1 x2y2z2 x3y3z3 ...
float *d_r = NULL;
float *d_azimuth = NULL;
float *d_altitude = NULL;
unsigned long int* d_encoder_count;
hipMalloc(&d_point_cloud, 3*bytes_r);
hipMalloc(&d_r, bytes_r);
hipMalloc(&d_azimuth, bytes_angles);
hipMalloc(&d_altitude, bytes_angles);
hipMalloc(&d_encoder_count, sizeof(unsigned long int));
//mover data a GPU
hipMemcpy(d_r,h_r,bytes_r,hipMemcpyHostToDevice);
hipMemcpy(d_azimuth,h_azimuth,bytes_angles,hipMemcpyHostToDevice);
hipMemcpy(d_altitude,h_altitude,bytes_angles,hipMemcpyHostToDevice);
hipMemcpy(d_encoder_count,&h_encoder_count,sizeof(unsigned long int),hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int BlockSize = NUM_POINTS/16;
int GridSize = 16;
//lanzar el kernel
hipEventRecord(start);
Conversion<<<GridSize,BlockSize>>>(d_r, d_encoder_count, d_altitude, d_azimuth, d_point_cloud);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Kernel's elapsed time: %.3f ms\n",milliseconds);
//mover data de salida al CPU
hipMemcpy(h_point_cloud, d_point_cloud, 3 * bytes_r, hipMemcpyDeviceToHost);
///////Fin del Bloque 2///////
///////Bloque 3: Escribir los puntos en un documento de salida (Output_file.csv)///////
//abrir el documento a llenar
document = fopen("Output_file.csv", "w");
if (!document) {
perror("File opening failed");
return 0;
}
//llenar el documento con datos
for (i = 0; i < NUM_POINTS; i++)
{
for (j = 0; j < 2; j++) fprintf(document, "%.4f, ", h_point_cloud[j + i * 3]);
fprintf(document, "%.4f\n ", h_point_cloud[j + i * 3]);
}
fclose(document);
printf("Success!\n");
///////Fin del Bloque 3///////
//liberar memoria
free(document);
free(h_r), free(h_altitude), free(h_azimuth), free(h_point_cloud);
hipFree(d_r), hipFree(d_altitude), hipFree(d_azimuth), hipFree(d_point_cloud), hipFree(d_encoder_count);
return 1;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*Este programa recibe un archivo CSV con 64 LiDAR data packets y
devuelve un vector de 16384 valores en double con informacion de radios de los puntos escaneados*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#define NUM_POINTS 16384
__global__
void Conversion(float* r, unsigned long int* encoder_count, float* altitude, float* azimuth, float* point_cloud)
{
int azimuth_block, channel;
unsigned long int counter;
float theta, phi;
int i = blockIdx.x * blockDim.x + threadIdx.x;
azimuth_block = i / 16;
counter = (encoder_count[0] + azimuth_block * 88) % 90112;
channel = i % 16;
theta = (float)(2 * M_PI * (counter / 90112.0 + azimuth[channel] / 360.0));
phi = (float)(2 * M_PI * altitude[channel] / 360.0);
point_cloud[0 + 3 * i] = (float)(r[i] * cos(theta) * cos(phi));//x
point_cloud[1 + 3 * i] = (float)(-r[i] * sin(theta) * cos(phi));//y
point_cloud[2 + 3 * i] = (float)(r[i] * sin(phi));//z
}
int main(void)
{
///////Bloque 1: Abrir y leer los archivos Donut y beam_intrinsics y //////
int i = 0;
const int N_LINE = 128;//numero maximo de caracteres a leer en cada linea
char line[N_LINE];
FILE* document;
document = fopen("Donut_1024x16.csv", "r");//abrir el archivo
if (!document) {//revisar si fue correctamente abierto
perror("File opening failed");
return 0;
}
float* h_r = NULL;//radios
size_t bytes_r = NUM_POINTS*sizeof(float);
h_r = (float*)malloc(bytes_r);
unsigned long int h_encoder_count = 0;//contador inicial del encoder (luego crece en 88 ticks)
int offset = 0;
unsigned long int word = 0;
int channel = 2;
int azimuth_block = 0;
int lidar_packet = 0;
int idx_line;//indice palabra a leer
int j = 1;//numero de linea
while (fgets(line, N_LINE, document) != NULL)
{
//obtener el primer valor de encoder_count
if (j == 13) h_encoder_count = atoi(line);
if (j == 14) h_encoder_count = atoi(line) << 8 | h_encoder_count;
//leer radios del archivo Donut
idx_line = 17 + 12 * channel + 788 * azimuth_block + 12608 * lidar_packet;
if (j == idx_line) word = (unsigned long int) atoi(line);
if (j == idx_line + 1) word = (unsigned long int) (atoi(line) << 8) | word;
if (j == idx_line + 2) word = (unsigned long int) ((atoi(line) & 0x0000000F)<<16) | word;
if (j > (idx_line + 2))//si se leyo el radio, pasar al sgte channel
{
h_r[offset] = (float)word;
offset++;
channel += 4;
}
if (channel >= 64)//si se terminaron los channels del bloque, pasar al sgte azimuth block
{
channel = 2;
azimuth_block++;
}
if (azimuth_block >= 16)//si se terminaron los azimuth blocks, pasar al sgte lidar packet
{
azimuth_block = 0;
lidar_packet++;
}
if (lidar_packet >= 64) break;//si se terminaron los lidar packets, salir
j++;
}
fclose(document);
//printf("%ld\n",h_encoder_count);
//for(i=0;i<100;i++) printf("%.3f\n",h_r[i]);
//lectura del archivo beam_intrinsics
document = fopen("beam_intrinsics.csv", "r");//abrir el archivo
if (!document) {//revisar si fue correctamente abierto
perror("File opening failed");
return 0;
}
float *h_altitude = NULL;
float *h_azimuth = NULL;
size_t bytes_angles = 16 * sizeof(float);//16 channels
h_altitude = (float*)malloc(bytes_angles);
h_azimuth = (float*)malloc(bytes_angles);
j = 1;
while (fgets(line, N_LINE, document) != NULL)
{
//leer altitute angles
if (j == 2) offset = 0;
if (j >= 2 && j <= 65)
{
if (j % 4 == 0)
{
h_altitude[offset] = (float)atof(line);
offset++;
}
}
//leer azimuth angles
if (j == 68) offset = 0;
if (j >= 68 && j <= 131)
{
if ((j - 66) % 4 == 0)
{
h_azimuth[offset] = (float)atof(line);
offset++;
}
}
j++;
}
fclose(document);
//for(i=0;i<16;i++) printf("%.3f\n",h_altitude[i]);
//for(i=0;i<16;i++) printf("%.3f\n",h_azimuth[i]);
///////Fin del Bloque 1///////
///////Bloque 2: Conversion a coordenadas cartesianas///////
//reservar memoria para el puntero de salida
float *h_point_cloud = NULL;
h_point_cloud = (float*)malloc(3 * bytes_r);
//declaracion de variables y reserva de memoria en el GPU
float *d_point_cloud = NULL;//arreglo con los puntos en coordenadas cartesianas
//formato: x1y1z1 x2y2z2 x3y3z3 ...
float *d_r = NULL;
float *d_azimuth = NULL;
float *d_altitude = NULL;
unsigned long int* d_encoder_count;
hipMalloc(&d_point_cloud, 3*bytes_r);
hipMalloc(&d_r, bytes_r);
hipMalloc(&d_azimuth, bytes_angles);
hipMalloc(&d_altitude, bytes_angles);
hipMalloc(&d_encoder_count, sizeof(unsigned long int));
//mover data a GPU
hipMemcpy(d_r,h_r,bytes_r,hipMemcpyHostToDevice);
hipMemcpy(d_azimuth,h_azimuth,bytes_angles,hipMemcpyHostToDevice);
hipMemcpy(d_altitude,h_altitude,bytes_angles,hipMemcpyHostToDevice);
hipMemcpy(d_encoder_count,&h_encoder_count,sizeof(unsigned long int),hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int BlockSize = NUM_POINTS/16;
int GridSize = 16;
//lanzar el kernel
hipEventRecord(start);
Conversion<<<GridSize,BlockSize>>>(d_r, d_encoder_count, d_altitude, d_azimuth, d_point_cloud);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Kernel's elapsed time: %.3f ms\n",milliseconds);
//mover data de salida al CPU
hipMemcpy(h_point_cloud, d_point_cloud, 3 * bytes_r, hipMemcpyDeviceToHost);
///////Fin del Bloque 2///////
///////Bloque 3: Escribir los puntos en un documento de salida (Output_file.csv)///////
//abrir el documento a llenar
document = fopen("Output_file.csv", "w");
if (!document) {
perror("File opening failed");
return 0;
}
//llenar el documento con datos
for (i = 0; i < NUM_POINTS; i++)
{
for (j = 0; j < 2; j++) fprintf(document, "%.4f, ", h_point_cloud[j + i * 3]);
fprintf(document, "%.4f\n ", h_point_cloud[j + i * 3]);
}
fclose(document);
printf("Success!\n");
///////Fin del Bloque 3///////
//liberar memoria
free(document);
free(h_r), free(h_altitude), free(h_azimuth), free(h_point_cloud);
hipFree(d_r), hipFree(d_altitude), hipFree(d_azimuth), hipFree(d_point_cloud), hipFree(d_encoder_count);
return 1;
} | .text
.file "Leer_LiDAR_data_paralelo.hip"
.globl _Z25__device_stub__ConversionPfPmS_S_S_ # -- Begin function _Z25__device_stub__ConversionPfPmS_S_S_
.p2align 4, 0x90
.type _Z25__device_stub__ConversionPfPmS_S_S_,@function
_Z25__device_stub__ConversionPfPmS_S_S_: # @_Z25__device_stub__ConversionPfPmS_S_S_
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10ConversionPfPmS_S_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z25__device_stub__ConversionPfPmS_S_S_, .Lfunc_end0-_Z25__device_stub__ConversionPfPmS_S_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $360, %rsp # imm = 0x168
.cfi_def_cfa_offset 416
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $.L.str, %edi
movl $.L.str.1, %esi
callq fopen
testq %rax, %rax
je .LBB1_1
# %bb.2:
movq %rax, %r15
movl $65536, %edi # imm = 0x10000
callq malloc
movq %rax, 48(%rsp) # 8-byte Spill
movq $0, 72(%rsp)
leaq 224(%rsp), %rdi
movl $128, %esi
movq %r15, %rdx
callq fgets
testq %rax, %rax
je .LBB1_3
# %bb.4: # %.lr.ph.preheader
movl $1, %r13d
xorl %ebp, %ebp
xorl %eax, %eax
movq %rax, (%rsp) # 8-byte Spill
movl $2, %r12d
xorl %ebx, %ebx
xorl %r14d, %r14d
movq %r15, 8(%rsp) # 8-byte Spill
.p2align 4, 0x90
.LBB1_5: # %.lr.ph
# =>This Inner Loop Header: Depth=1
cmpl $14, %r13d
je .LBB1_8
# %bb.6: # %.lr.ph
# in Loop: Header=BB1_5 Depth=1
cmpl $13, %r13d
jne .LBB1_9
# %bb.7: # in Loop: Header=BB1_5 Depth=1
leaq 224(%rsp), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
cltq
movq %rax, 72(%rsp)
jmp .LBB1_9
.p2align 4, 0x90
.LBB1_8: # in Loop: Header=BB1_5 Depth=1
leaq 224(%rsp), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
shll $8, %eax
cltq
orq %rax, 72(%rsp)
.LBB1_9: # in Loop: Header=BB1_5 Depth=1
leal (%r12,%r12,2), %eax
imull $788, %r14d, %ecx # imm = 0x314
imull $12608, %ebx, %edx # imm = 0x3140
addl %ecx, %edx
leal (%rdx,%rax,4), %r15d
addl $17, %r15d
cmpl %r15d, %r13d
jne .LBB1_11
# %bb.10: # in Loop: Header=BB1_5 Depth=1
leaq 224(%rsp), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
cltq
movq %rax, (%rsp) # 8-byte Spill
.LBB1_11: # in Loop: Header=BB1_5 Depth=1
leal 1(%r15), %eax
cmpl %eax, %r13d
jne .LBB1_12
# %bb.13: # in Loop: Header=BB1_5 Depth=1
leaq 224(%rsp), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
shll $8, %eax
cltq
movq (%rsp), %rcx # 8-byte Reload
orq %rax, %rcx
jmp .LBB1_14
.p2align 4, 0x90
.LBB1_12: # in Loop: Header=BB1_5 Depth=1
movq (%rsp), %rcx # 8-byte Reload
.LBB1_14: # in Loop: Header=BB1_5 Depth=1
orl $2, %r15d
cmpl %r15d, %r13d
jne .LBB1_16
# %bb.15: # in Loop: Header=BB1_5 Depth=1
leaq 224(%rsp), %rdi
xorl %esi, %esi
movl $10, %edx
movl %ebx, (%rsp) # 4-byte Spill
movq %rcx, %rbx
callq __isoc23_strtol
andl $15, %eax
shll $16, %eax
orq %rax, %rbx
movq %rbx, %rcx
movl (%rsp), %ebx # 4-byte Reload
.LBB1_16: # in Loop: Header=BB1_5 Depth=1
cmpl %r15d, %r13d
movq %rcx, (%rsp) # 8-byte Spill
jle .LBB1_17
# %bb.18: # in Loop: Header=BB1_5 Depth=1
testq %rcx, %rcx
movq 8(%rsp), %r15 # 8-byte Reload
js .LBB1_19
# %bb.20: # in Loop: Header=BB1_5 Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %rcx, %xmm0
jmp .LBB1_21
.p2align 4, 0x90
.LBB1_17: # in Loop: Header=BB1_5 Depth=1
movq 8(%rsp), %r15 # 8-byte Reload
jmp .LBB1_22
.p2align 4, 0x90
.LBB1_19: # in Loop: Header=BB1_5 Depth=1
movq %rcx, %rax
shrq %rax
# kill: def $ecx killed $ecx killed $rcx def $rcx
andl $1, %ecx
orq %rax, %rcx
xorps %xmm0, %xmm0
cvtsi2ss %rcx, %xmm0
addss %xmm0, %xmm0
.LBB1_21: # in Loop: Header=BB1_5 Depth=1
movslq %ebp, %rbp
movq 48(%rsp), %rax # 8-byte Reload
movss %xmm0, (%rax,%rbp,4)
incl %ebp
addl $4, %r12d
.LBB1_22: # in Loop: Header=BB1_5 Depth=1
xorl %eax, %eax
cmpl $64, %r12d
setge %al
addl %eax, %r14d
xorl %eax, %eax
cmpl $16, %r14d
setge %al
addl %eax, %ebx
cmpl $63, %ebx
ja .LBB1_24
# %bb.23: # in Loop: Header=BB1_5 Depth=1
cmpl $16, %r14d
movl $0, %eax
cmovgel %eax, %r14d
cmpl $64, %r12d
movl $2, %eax
cmovgel %eax, %r12d
incl %r13d
leaq 224(%rsp), %rdi
movl $128, %esi
movq %r15, %rdx
callq fgets
testq %rax, %rax
jne .LBB1_5
jmp .LBB1_24
.LBB1_3:
xorl %ebp, %ebp
.LBB1_24: # %._crit_edge
movq %r15, %rdi
callq fclose
movl $.L.str.3, %edi
movl $.L.str.1, %esi
callq fopen
testq %rax, %rax
je .LBB1_1
# %bb.25:
movq %rax, %r12
movl $64, %edi
callq malloc
movq %rax, (%rsp) # 8-byte Spill
movl $64, %edi
callq malloc
movq %rax, 8(%rsp) # 8-byte Spill
leaq 224(%rsp), %rdi
movl $128, %esi
movq %r12, %rdx
callq fgets
testq %rax, %rax
je .LBB1_34
# %bb.26: # %.lr.ph141.preheader
movl $1, %r14d
xorl %ebx, %ebx
leaq 224(%rsp), %r13
jmp .LBB1_27
.p2align 4, 0x90
.LBB1_33: # in Loop: Header=BB1_27 Depth=1
movq %r13, %rdi
movl $128, %esi
movq %r12, %rdx
callq fgets
addl $69, %r14d
testq %rax, %rax
je .LBB1_34
.LBB1_27: # %.lr.ph141
# =>This Inner Loop Header: Depth=1
movl %r14d, %r15d
cmpl $2, %r14d
cmovel %ebx, %ebp
leal -2(%r15), %r14d
andl $3, %r15d
cmpl $63, %r14d
ja .LBB1_30
# %bb.28: # %.lr.ph141
# in Loop: Header=BB1_27 Depth=1
testl %r15d, %r15d
jne .LBB1_30
# %bb.29: # in Loop: Header=BB1_27 Depth=1
movq %r13, %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
movslq %ebp, %rbp
movq (%rsp), %rax # 8-byte Reload
movss %xmm0, (%rax,%rbp,4)
incl %ebp
.LBB1_30: # in Loop: Header=BB1_27 Depth=1
cmpl $66, %r14d
cmovel %ebx, %ebp
addl $-66, %r14d
cmpl $63, %r14d
ja .LBB1_33
# %bb.31: # in Loop: Header=BB1_27 Depth=1
cmpl $2, %r15d
jne .LBB1_33
# %bb.32: # in Loop: Header=BB1_27 Depth=1
movq %r13, %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
movslq %ebp, %rbp
movq 8(%rsp), %rax # 8-byte Reload
movss %xmm0, (%rax,%rbp,4)
incl %ebp
jmp .LBB1_33
.LBB1_34: # %._crit_edge142
movq %r12, %rdi
callq fclose
movl $196608, %edi # imm = 0x30000
callq malloc
movq %rax, %r12
movq $0, 40(%rsp)
movq $0, 32(%rsp)
movq $0, 24(%rsp)
movq $0, 16(%rsp)
leaq 40(%rsp), %rdi
movl $196608, %esi # imm = 0x30000
callq hipMalloc
leaq 32(%rsp), %rdi
movl $65536, %esi # imm = 0x10000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $64, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $64, %esi
callq hipMalloc
leaq 64(%rsp), %rdi
movl $8, %esi
callq hipMalloc
movq 32(%rsp), %rdi
movl $65536, %edx # imm = 0x10000
movq 48(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $64, %edx
movq 8(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $64, %edx
movq (%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 64(%rsp), %rdi
leaq 72(%rsp), %rsi
movl $8, %edx
movl $1, %ecx
callq hipMemcpy
leaq 80(%rsp), %rdi
callq hipEventCreate
leaq 56(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967312, %rdi # imm = 0x100000010
leaq 1008(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_36
# %bb.35:
movq 32(%rsp), %rax
movq 64(%rsp), %rcx
movq 16(%rsp), %rdx
movq 24(%rsp), %rsi
movq 40(%rsp), %rdi
movq %rax, 168(%rsp)
movq %rcx, 160(%rsp)
movq %rdx, 152(%rsp)
movq %rsi, 144(%rsp)
movq %rdi, 136(%rsp)
leaq 168(%rsp), %rax
movq %rax, 176(%rsp)
leaq 160(%rsp), %rax
movq %rax, 184(%rsp)
leaq 152(%rsp), %rax
movq %rax, 192(%rsp)
leaq 144(%rsp), %rax
movq %rax, 200(%rsp)
leaq 136(%rsp), %rax
movq %rax, 208(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 176(%rsp), %r9
movl $_Z10ConversionPfPmS_S_S_, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_36:
callq hipDeviceSynchronize
movq 56(%rsp), %rdi
xorl %r13d, %r13d
xorl %esi, %esi
callq hipEventRecord
movq 56(%rsp), %rdi
callq hipEventSynchronize
movl $0, 176(%rsp)
movq 80(%rsp), %rsi
movq 56(%rsp), %rdx
leaq 176(%rsp), %rdi
callq hipEventElapsedTime
movss 176(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rsi
movl $196608, %edx # imm = 0x30000
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.L.str.5, %edi
movl $.L.str.6, %esi
callq fopen
testq %rax, %rax
je .LBB1_43
# %bb.37: # %.preheader127
movq %rax, %rbp
xorl %ebx, %ebx
movq %r12, %r14
.p2align 4, 0x90
.LBB1_38: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_39 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_39: # Parent Loop BB1_38 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %esi
movq %rbp, %rdi
movb $1, %al
callq fprintf
incq %r15
cmpq $1, %r15
je .LBB1_39
# %bb.40: # in Loop: Header=BB1_38 Depth=1
leaq (%rbx,%rbx,2), %rax
movss 8(%r12,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %esi
movq %rbp, %rdi
movb $1, %al
callq fprintf
incq %rbx
addq $12, %r14
cmpq $16384, %rbx # imm = 0x4000
jne .LBB1_38
# %bb.41:
movq %rbp, %rdi
callq fclose
movl $.Lstr, %edi
callq puts@PLT
movq %rbp, %rdi
callq free
movq 48(%rsp), %rdi # 8-byte Reload
callq free
movq (%rsp), %rdi # 8-byte Reload
callq free
movq 8(%rsp), %rdi # 8-byte Reload
callq free
movq %r12, %rdi
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipFree
movl $1, %r13d
.LBB1_42:
movl %r13d, %eax
addq $360, %rsp # imm = 0x168
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_1:
.cfi_def_cfa_offset 416
movl $.L.str.2, %edi
callq perror
xorl %r13d, %r13d
jmp .LBB1_42
.LBB1_43:
movl $.L.str.2, %edi
callq perror
jmp .LBB1_42
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10ConversionPfPmS_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10ConversionPfPmS_S_S_,@object # @_Z10ConversionPfPmS_S_S_
.section .rodata,"a",@progbits
.globl _Z10ConversionPfPmS_S_S_
.p2align 3, 0x0
_Z10ConversionPfPmS_S_S_:
.quad _Z25__device_stub__ConversionPfPmS_S_S_
.size _Z10ConversionPfPmS_S_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Donut_1024x16.csv"
.size .L.str, 18
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "r"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "File opening failed"
.size .L.str.2, 20
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "beam_intrinsics.csv"
.size .L.str.3, 20
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Kernel's elapsed time: %.3f ms\n"
.size .L.str.4, 32
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Output_file.csv"
.size .L.str.5, 16
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "w"
.size .L.str.6, 2
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%.4f, "
.size .L.str.7, 7
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%.4f\n "
.size .L.str.8, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10ConversionPfPmS_S_S_"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Success!"
.size .Lstr, 9
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__ConversionPfPmS_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10ConversionPfPmS_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0012abb3_00000000-6_Leer_LiDAR_data_paralelo.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_
.type _Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_, @function
_Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z10ConversionPfPmS_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_, .-_Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_
.globl _Z10ConversionPfPmS_S_S_
.type _Z10ConversionPfPmS_S_S_, @function
_Z10ConversionPfPmS_S_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10ConversionPfPmS_S_S_, .-_Z10ConversionPfPmS_S_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "r"
.LC1:
.string "Donut_1024x16.csv"
.LC2:
.string "File opening failed"
.LC3:
.string "beam_intrinsics.csv"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "Kernel's elapsed time: %.3f ms\n"
.section .rodata.str1.1
.LC6:
.string "w"
.LC7:
.string "Output_file.csv"
.LC8:
.string "%.4f, "
.LC9:
.string "%.4f\n "
.LC10:
.string "Success!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $280, %rsp
.cfi_def_cfa_offset 336
movq %fs:40, %rax
movq %rax, 264(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
leaq .LC1(%rip), %rdi
call fopen@PLT
movq %rax, 16(%rsp)
testq %rax, %rax
je .L44
movl $65536, %edi
call malloc@PLT
movq %rax, 24(%rsp)
movq $0, 40(%rsp)
movl $1, %ebp
movl $0, %r15d
movl $0, %r14d
movl $2, %r12d
movq $0, 8(%rsp)
movl $0, %r13d
leaq 128(%rsp), %rax
movq %rax, (%rsp)
jmp .L14
.L44:
leaq .LC2(%rip), %rdi
call perror@PLT
movl $0, %eax
jmp .L11
.L45:
movl $10, %edx
movl $0, %esi
movq (%rsp), %rdi
call __isoc23_strtol@PLT
cltq
movq %rax, 40(%rsp)
leal (%r12,%r12,2), %eax
imull $788, %r14d, %edx
leal 17(%rdx,%rax,4), %ebx
imull $12608, %r15d, %eax
addl %eax, %ebx
cmpl %ebx, %ebp
jne .L17
.L16:
movl $10, %edx
movl $0, %esi
movq (%rsp), %rdi
call __isoc23_strtol@PLT
cltq
movq %rax, 8(%rsp)
jmp .L37
.L46:
movl $10, %edx
movl $0, %esi
movq (%rsp), %rdi
call __isoc23_strtol@PLT
sall $8, %eax
cltq
orq %rax, 40(%rsp)
leal (%r12,%r12,2), %eax
imull $788, %r14d, %edx
leal 17(%rdx,%rax,4), %ebx
imull $12608, %r15d, %eax
addl %eax, %ebx
jmp .L19
.L47:
movl $10, %edx
movl $0, %esi
movq (%rsp), %rdi
call __isoc23_strtol@PLT
sall $8, %eax
cltq
orq %rax, 8(%rsp)
jmp .L17
.L48:
movl $10, %edx
movl $0, %esi
movq (%rsp), %rdi
call __isoc23_strtol@PLT
sall $16, %eax
andl $983040, %eax
orq %rax, 8(%rsp)
jmp .L37
.L22:
movq 8(%rsp), %rcx
movq %rcx, %rdx
shrq %rdx
andl $1, %ecx
orq %rcx, %rdx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
addss %xmm0, %xmm0
.L23:
movq 24(%rsp), %rcx
movss %xmm0, (%rcx,%rax,4)
addl $1, %r13d
addl $4, %r12d
cmpl $63, %r12d
jle .L37
addl $1, %r14d
cmpl $15, %r14d
jle .L39
addl $1, %r15d
cmpl $63, %r15d
jg .L26
movl $0, %r14d
movl $2, %r12d
.L37:
addl $1, %ebp
.L14:
movq 16(%rsp), %rcx
movl $128, %edx
movl $128, %esi
movq (%rsp), %rdi
call __fgets_chk@PLT
testq %rax, %rax
je .L26
cmpl $13, %ebp
je .L45
cmpl $14, %ebp
je .L46
leal (%r12,%r12,2), %eax
imull $788, %r14d, %edx
leal 17(%rdx,%rax,4), %ebx
imull $12608, %r15d, %eax
addl %eax, %ebx
cmpl %ebx, %ebp
je .L16
.L19:
leal 1(%rbx), %eax
cmpl %ebp, %eax
je .L47
.L17:
addl $2, %ebx
cmpl %ebp, %ebx
je .L48
jge .L37
movslq %r13d, %rax
movq 8(%rsp), %rcx
testq %rcx, %rcx
js .L22
pxor %xmm0, %xmm0
cvtsi2ssq %rcx, %xmm0
jmp .L23
.L26:
movq 16(%rsp), %rdi
call fclose@PLT
leaq .LC0(%rip), %rsi
leaq .LC3(%rip), %rdi
call fopen@PLT
movq %rax, %rbp
testq %rax, %rax
je .L49
movl $64, %edi
call malloc@PLT
movq %rax, (%rsp)
movl $64, %edi
call malloc@PLT
movq %rax, 8(%rsp)
movl $1, %ebx
leaq 128(%rsp), %r12
jmp .L29
.L49:
leaq .LC2(%rip), %rdi
call perror@PLT
movl $0, %eax
jmp .L11
.L53:
testb $3, %bl
jne .L32
movl $0, %esi
movq %r12, %rdi
call strtod@PLT
movslq %r13d, %rax
cvtsd2ss %xmm0, %xmm0
movq (%rsp), %rcx
movss %xmm0, (%rcx,%rax,4)
addl $1, %r13d
jmp .L32
.L54:
leal -68(%rbx), %eax
cmpl $63, %eax
ja .L32
leal -66(%rbx), %eax
testb $3, %al
jne .L32
movl $0, %esi
movq %r12, %rdi
call strtod@PLT
movslq %r13d, %rax
cvtsd2ss %xmm0, %xmm0
movq 8(%rsp), %rcx
movss %xmm0, (%rcx,%rax,4)
addl $1, %r13d
jmp .L32
.L52:
movq %rbp, %rdi
call fclose@PLT
movl $196608, %edi
call malloc@PLT
movq %rax, %r15
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movq $0, 72(%rsp)
leaq 48(%rsp), %rdi
movl $196608, %esi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $65536, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movl $8, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $65536, %edx
movq 24(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $64, %edx
movq 8(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $64, %edx
movq (%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
leaq 40(%rsp), %rsi
movl $1, %ecx
movl $8, %edx
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
leaq 88(%rsp), %rdi
call cudaEventCreate@PLT
leaq 96(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 88(%rsp), %rdi
call cudaEventRecord@PLT
movl $1024, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $16, 104(%rsp)
movl $1, 108(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 116(%rsp), %rdx
movl $1, %ecx
movq 104(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L50
.L34:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 96(%rsp), %rdi
call cudaEventRecord@PLT
movq 96(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 116(%rsp)
leaq 116(%rsp), %rdi
movq 96(%rsp), %rdx
movq 88(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 116(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $196608, %edx
movq 48(%rsp), %rsi
movq %r15, %rdi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
leaq .LC7(%rip), %rdi
call fopen@PLT
movq %rax, %rbp
testq %rax, %rax
je .L35
movq %r15, %rbx
leaq 196608(%r15), %r14
leaq .LC8(%rip), %r12
leaq .LC9(%rip), %r13
.L36:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $1, %eax
call __fprintf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 4(%rbx), %xmm0
movq %r12, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $1, %eax
call __fprintf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 8(%rbx), %xmm0
movq %r13, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $1, %eax
call __fprintf_chk@PLT
addq $12, %rbx
cmpq %rbx, %r14
jne .L36
movq %rbp, %rdi
call fclose@PLT
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movq (%rsp), %rdi
call free@PLT
movq 8(%rsp), %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movl $1, %eax
.L11:
movq 264(%rsp), %rdx
subq %fs:40, %rdx
jne .L51
addq $280, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L50:
.cfi_restore_state
movq 48(%rsp), %r8
movq 64(%rsp), %rcx
movq 72(%rsp), %rdx
movq 80(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z38__device_stub__Z10ConversionPfPmS_S_S_PfPmS_S_S_
jmp .L34
.L35:
leaq .LC2(%rip), %rdi
call perror@PLT
movl $0, %eax
jmp .L11
.L40:
movl $0, %r13d
.L32:
addl $1, %ebx
.L29:
movq %rbp, %rcx
movl $128, %edx
movl $128, %esi
movq %r12, %rdi
call __fgets_chk@PLT
testq %rax, %rax
je .L52
cmpl $2, %ebx
je .L40
leal -2(%rbx), %eax
cmpl $63, %eax
jbe .L53
cmpl $68, %ebx
jne .L54
movl $0, %r13d
jmp .L32
.L39:
movl $2, %r12d
jmp .L37
.L51:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "_Z10ConversionPfPmS_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z10ConversionPfPmS_S_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "Leer_LiDAR_data_paralelo.hip"
.globl _Z25__device_stub__ConversionPfPmS_S_S_ # -- Begin function _Z25__device_stub__ConversionPfPmS_S_S_
.p2align 4, 0x90
.type _Z25__device_stub__ConversionPfPmS_S_S_,@function
_Z25__device_stub__ConversionPfPmS_S_S_: # @_Z25__device_stub__ConversionPfPmS_S_S_
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10ConversionPfPmS_S_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z25__device_stub__ConversionPfPmS_S_S_, .Lfunc_end0-_Z25__device_stub__ConversionPfPmS_S_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $360, %rsp # imm = 0x168
.cfi_def_cfa_offset 416
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $.L.str, %edi
movl $.L.str.1, %esi
callq fopen
testq %rax, %rax
je .LBB1_1
# %bb.2:
movq %rax, %r15
movl $65536, %edi # imm = 0x10000
callq malloc
movq %rax, 48(%rsp) # 8-byte Spill
movq $0, 72(%rsp)
leaq 224(%rsp), %rdi
movl $128, %esi
movq %r15, %rdx
callq fgets
testq %rax, %rax
je .LBB1_3
# %bb.4: # %.lr.ph.preheader
movl $1, %r13d
xorl %ebp, %ebp
xorl %eax, %eax
movq %rax, (%rsp) # 8-byte Spill
movl $2, %r12d
xorl %ebx, %ebx
xorl %r14d, %r14d
movq %r15, 8(%rsp) # 8-byte Spill
.p2align 4, 0x90
.LBB1_5: # %.lr.ph
# =>This Inner Loop Header: Depth=1
cmpl $14, %r13d
je .LBB1_8
# %bb.6: # %.lr.ph
# in Loop: Header=BB1_5 Depth=1
cmpl $13, %r13d
jne .LBB1_9
# %bb.7: # in Loop: Header=BB1_5 Depth=1
leaq 224(%rsp), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
cltq
movq %rax, 72(%rsp)
jmp .LBB1_9
.p2align 4, 0x90
.LBB1_8: # in Loop: Header=BB1_5 Depth=1
leaq 224(%rsp), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
shll $8, %eax
cltq
orq %rax, 72(%rsp)
.LBB1_9: # in Loop: Header=BB1_5 Depth=1
leal (%r12,%r12,2), %eax
imull $788, %r14d, %ecx # imm = 0x314
imull $12608, %ebx, %edx # imm = 0x3140
addl %ecx, %edx
leal (%rdx,%rax,4), %r15d
addl $17, %r15d
cmpl %r15d, %r13d
jne .LBB1_11
# %bb.10: # in Loop: Header=BB1_5 Depth=1
leaq 224(%rsp), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
cltq
movq %rax, (%rsp) # 8-byte Spill
.LBB1_11: # in Loop: Header=BB1_5 Depth=1
leal 1(%r15), %eax
cmpl %eax, %r13d
jne .LBB1_12
# %bb.13: # in Loop: Header=BB1_5 Depth=1
leaq 224(%rsp), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
shll $8, %eax
cltq
movq (%rsp), %rcx # 8-byte Reload
orq %rax, %rcx
jmp .LBB1_14
.p2align 4, 0x90
.LBB1_12: # in Loop: Header=BB1_5 Depth=1
movq (%rsp), %rcx # 8-byte Reload
.LBB1_14: # in Loop: Header=BB1_5 Depth=1
orl $2, %r15d
cmpl %r15d, %r13d
jne .LBB1_16
# %bb.15: # in Loop: Header=BB1_5 Depth=1
leaq 224(%rsp), %rdi
xorl %esi, %esi
movl $10, %edx
movl %ebx, (%rsp) # 4-byte Spill
movq %rcx, %rbx
callq __isoc23_strtol
andl $15, %eax
shll $16, %eax
orq %rax, %rbx
movq %rbx, %rcx
movl (%rsp), %ebx # 4-byte Reload
.LBB1_16: # in Loop: Header=BB1_5 Depth=1
cmpl %r15d, %r13d
movq %rcx, (%rsp) # 8-byte Spill
jle .LBB1_17
# %bb.18: # in Loop: Header=BB1_5 Depth=1
testq %rcx, %rcx
movq 8(%rsp), %r15 # 8-byte Reload
js .LBB1_19
# %bb.20: # in Loop: Header=BB1_5 Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %rcx, %xmm0
jmp .LBB1_21
.p2align 4, 0x90
.LBB1_17: # in Loop: Header=BB1_5 Depth=1
movq 8(%rsp), %r15 # 8-byte Reload
jmp .LBB1_22
.p2align 4, 0x90
.LBB1_19: # in Loop: Header=BB1_5 Depth=1
movq %rcx, %rax
shrq %rax
# kill: def $ecx killed $ecx killed $rcx def $rcx
andl $1, %ecx
orq %rax, %rcx
xorps %xmm0, %xmm0
cvtsi2ss %rcx, %xmm0
addss %xmm0, %xmm0
.LBB1_21: # in Loop: Header=BB1_5 Depth=1
movslq %ebp, %rbp
movq 48(%rsp), %rax # 8-byte Reload
movss %xmm0, (%rax,%rbp,4)
incl %ebp
addl $4, %r12d
.LBB1_22: # in Loop: Header=BB1_5 Depth=1
xorl %eax, %eax
cmpl $64, %r12d
setge %al
addl %eax, %r14d
xorl %eax, %eax
cmpl $16, %r14d
setge %al
addl %eax, %ebx
cmpl $63, %ebx
ja .LBB1_24
# %bb.23: # in Loop: Header=BB1_5 Depth=1
cmpl $16, %r14d
movl $0, %eax
cmovgel %eax, %r14d
cmpl $64, %r12d
movl $2, %eax
cmovgel %eax, %r12d
incl %r13d
leaq 224(%rsp), %rdi
movl $128, %esi
movq %r15, %rdx
callq fgets
testq %rax, %rax
jne .LBB1_5
jmp .LBB1_24
.LBB1_3:
xorl %ebp, %ebp
.LBB1_24: # %._crit_edge
movq %r15, %rdi
callq fclose
movl $.L.str.3, %edi
movl $.L.str.1, %esi
callq fopen
testq %rax, %rax
je .LBB1_1
# %bb.25:
movq %rax, %r12
movl $64, %edi
callq malloc
movq %rax, (%rsp) # 8-byte Spill
movl $64, %edi
callq malloc
movq %rax, 8(%rsp) # 8-byte Spill
leaq 224(%rsp), %rdi
movl $128, %esi
movq %r12, %rdx
callq fgets
testq %rax, %rax
je .LBB1_34
# %bb.26: # %.lr.ph141.preheader
movl $1, %r14d
xorl %ebx, %ebx
leaq 224(%rsp), %r13
jmp .LBB1_27
.p2align 4, 0x90
.LBB1_33: # in Loop: Header=BB1_27 Depth=1
movq %r13, %rdi
movl $128, %esi
movq %r12, %rdx
callq fgets
addl $69, %r14d
testq %rax, %rax
je .LBB1_34
.LBB1_27: # %.lr.ph141
# =>This Inner Loop Header: Depth=1
movl %r14d, %r15d
cmpl $2, %r14d
cmovel %ebx, %ebp
leal -2(%r15), %r14d
andl $3, %r15d
cmpl $63, %r14d
ja .LBB1_30
# %bb.28: # %.lr.ph141
# in Loop: Header=BB1_27 Depth=1
testl %r15d, %r15d
jne .LBB1_30
# %bb.29: # in Loop: Header=BB1_27 Depth=1
movq %r13, %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
movslq %ebp, %rbp
movq (%rsp), %rax # 8-byte Reload
movss %xmm0, (%rax,%rbp,4)
incl %ebp
.LBB1_30: # in Loop: Header=BB1_27 Depth=1
cmpl $66, %r14d
cmovel %ebx, %ebp
addl $-66, %r14d
cmpl $63, %r14d
ja .LBB1_33
# %bb.31: # in Loop: Header=BB1_27 Depth=1
cmpl $2, %r15d
jne .LBB1_33
# %bb.32: # in Loop: Header=BB1_27 Depth=1
movq %r13, %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
movslq %ebp, %rbp
movq 8(%rsp), %rax # 8-byte Reload
movss %xmm0, (%rax,%rbp,4)
incl %ebp
jmp .LBB1_33
.LBB1_34: # %._crit_edge142
movq %r12, %rdi
callq fclose
movl $196608, %edi # imm = 0x30000
callq malloc
movq %rax, %r12
movq $0, 40(%rsp)
movq $0, 32(%rsp)
movq $0, 24(%rsp)
movq $0, 16(%rsp)
leaq 40(%rsp), %rdi
movl $196608, %esi # imm = 0x30000
callq hipMalloc
leaq 32(%rsp), %rdi
movl $65536, %esi # imm = 0x10000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $64, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $64, %esi
callq hipMalloc
leaq 64(%rsp), %rdi
movl $8, %esi
callq hipMalloc
movq 32(%rsp), %rdi
movl $65536, %edx # imm = 0x10000
movq 48(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $64, %edx
movq 8(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $64, %edx
movq (%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movq 64(%rsp), %rdi
leaq 72(%rsp), %rsi
movl $8, %edx
movl $1, %ecx
callq hipMemcpy
leaq 80(%rsp), %rdi
callq hipEventCreate
leaq 56(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967312, %rdi # imm = 0x100000010
leaq 1008(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_36
# %bb.35:
movq 32(%rsp), %rax
movq 64(%rsp), %rcx
movq 16(%rsp), %rdx
movq 24(%rsp), %rsi
movq 40(%rsp), %rdi
movq %rax, 168(%rsp)
movq %rcx, 160(%rsp)
movq %rdx, 152(%rsp)
movq %rsi, 144(%rsp)
movq %rdi, 136(%rsp)
leaq 168(%rsp), %rax
movq %rax, 176(%rsp)
leaq 160(%rsp), %rax
movq %rax, 184(%rsp)
leaq 152(%rsp), %rax
movq %rax, 192(%rsp)
leaq 144(%rsp), %rax
movq %rax, 200(%rsp)
leaq 136(%rsp), %rax
movq %rax, 208(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 176(%rsp), %r9
movl $_Z10ConversionPfPmS_S_S_, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_36:
callq hipDeviceSynchronize
movq 56(%rsp), %rdi
xorl %r13d, %r13d
xorl %esi, %esi
callq hipEventRecord
movq 56(%rsp), %rdi
callq hipEventSynchronize
movl $0, 176(%rsp)
movq 80(%rsp), %rsi
movq 56(%rsp), %rdx
leaq 176(%rsp), %rdi
callq hipEventElapsedTime
movss 176(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rsi
movl $196608, %edx # imm = 0x30000
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.L.str.5, %edi
movl $.L.str.6, %esi
callq fopen
testq %rax, %rax
je .LBB1_43
# %bb.37: # %.preheader127
movq %rax, %rbp
xorl %ebx, %ebx
movq %r12, %r14
.p2align 4, 0x90
.LBB1_38: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_39 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_39: # Parent Loop BB1_38 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %esi
movq %rbp, %rdi
movb $1, %al
callq fprintf
incq %r15
cmpq $1, %r15
je .LBB1_39
# %bb.40: # in Loop: Header=BB1_38 Depth=1
leaq (%rbx,%rbx,2), %rax
movss 8(%r12,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %esi
movq %rbp, %rdi
movb $1, %al
callq fprintf
incq %rbx
addq $12, %r14
cmpq $16384, %rbx # imm = 0x4000
jne .LBB1_38
# %bb.41:
movq %rbp, %rdi
callq fclose
movl $.Lstr, %edi
callq puts@PLT
movq %rbp, %rdi
callq free
movq 48(%rsp), %rdi # 8-byte Reload
callq free
movq (%rsp), %rdi # 8-byte Reload
callq free
movq 8(%rsp), %rdi # 8-byte Reload
callq free
movq %r12, %rdi
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipFree
movl $1, %r13d
.LBB1_42:
movl %r13d, %eax
addq $360, %rsp # imm = 0x168
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_1:
.cfi_def_cfa_offset 416
movl $.L.str.2, %edi
callq perror
xorl %r13d, %r13d
jmp .LBB1_42
.LBB1_43:
movl $.L.str.2, %edi
callq perror
jmp .LBB1_42
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10ConversionPfPmS_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10ConversionPfPmS_S_S_,@object # @_Z10ConversionPfPmS_S_S_
.section .rodata,"a",@progbits
.globl _Z10ConversionPfPmS_S_S_
.p2align 3, 0x0
_Z10ConversionPfPmS_S_S_:
.quad _Z25__device_stub__ConversionPfPmS_S_S_
.size _Z10ConversionPfPmS_S_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Donut_1024x16.csv"
.size .L.str, 18
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "r"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "File opening failed"
.size .L.str.2, 20
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "beam_intrinsics.csv"
.size .L.str.3, 20
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Kernel's elapsed time: %.3f ms\n"
.size .L.str.4, 32
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Output_file.csv"
.size .L.str.5, 16
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "w"
.size .L.str.6, 2
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%.4f, "
.size .L.str.7, 7
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%.4f\n "
.size .L.str.8, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10ConversionPfPmS_S_S_"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Success!"
.size .Lstr, 9
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__ConversionPfPmS_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10ConversionPfPmS_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <stdlib.h>
#include <fstream>
#include <string>
#include <vector>
#include <set>
using namespace std;
vector<string> splitpath( const string& str, const set<char> delimiters)
{
vector<string> result;
char const* pch = str.c_str();
char const* start = pch;
for(; *pch; ++pch)
{
if (delimiters.find(*pch) != delimiters.end())
{
if (start != pch)
{
string str(start, pch);
result.push_back(str);
}
else
{
result.push_back("");
}
start = pch + 1;
}
}
result.push_back(start);
return result;
}
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
cout << cudaGetErrorString(error) << endl; \
} \
} while (0)
// Sumar cada columna(pixel) de las imagenes en paralelo
__global__ void kernel_colSum(float *r_in, float *g_in, float *b_in,
float *r_result, float *g_result, float *b_result , int nrow, int ncol) {
int colIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (colIdx < ncol) {
float sum_r=0;
float sum_g=0;
float sum_b=0;
for (int k = 0 ; k < nrow ; k++) {
sum_r+=r_in[colIdx+ncol*k];
sum_g+=g_in[colIdx+ncol*k];
sum_b+=b_in[colIdx+ncol*k];
}
r_result[colIdx] = sum_r;
g_result[colIdx] = sum_g;
b_result[colIdx] = sum_b;
}
}
// Dividir cada canal por la cantidad de imagenes L
__global__ void kernel_colDiv(float *r_out, float *g_out, float *b_out, int nImg, int size) {
int Idx = threadIdx.x + blockIdx.x * blockDim.x;
if (Idx < size) {
r_out[Idx] = r_out[Idx]/nImg;
g_out[Idx] = g_out[Idx]/nImg;
b_out[Idx] = b_out[Idx]/nImg;
}
}
int main(int argc, char *argv[]){
string input_file_name;
if (argc > 1) {
input_file_name = argv[1];
}
ifstream infile;
infile.open(input_file_name.c_str());
int L,M,N, rows, cols, total_pixels;
float *r_host, *g_host, *b_host, *r_out_host, *g_out_host, *b_out_host;
float *r_dev, *g_dev, *b_dev, *r_out_dev, *g_out_dev, *b_out_dev;
infile >> L >> M >> N;
rows = L;
cols = M*N;
total_pixels = rows*cols;
// Allocating matrix
r_host = (float *)malloc(total_pixels * sizeof(float));
g_host = (float *)malloc(total_pixels * sizeof(float));
b_host = (float *)malloc(total_pixels * sizeof(float));
r_out_host = (float *)malloc(cols * sizeof(float));
g_out_host = (float *)malloc(cols * sizeof(float));
b_out_host = (float *)malloc(cols * sizeof(float));
// Initialize with zeros
// I didn't use Calloc because it doesn't work with floats
for (int j = 0; j < cols; j++)
{
r_out_host[j] = 0.5;
g_out_host[j] = 0.5;
b_out_host[j] = 0.5;
}
// Reading matrix
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
infile >> r_host[i*cols+j];
}
for (int j = 0; j < cols; j++)
{
infile >> g_host[i*cols+j];
}
for (int j = 0; j < cols; j++)
{
infile >> b_host[i*cols+j];
}
}
cudaEvent_t ct1, ct2;
float dt;
CUDA_CHECK(cudaMalloc((void**)&r_dev, total_pixels * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&g_dev, total_pixels * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&b_dev, total_pixels * sizeof(float)));
// Input matrix of images
CUDA_CHECK(cudaMemcpy(r_dev, r_host, total_pixels * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(g_dev, g_host, total_pixels * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(b_dev, b_host, total_pixels * sizeof(float), cudaMemcpyHostToDevice));
// Output image
CUDA_CHECK(cudaMalloc((void**)&r_out_dev, cols * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&g_out_dev, cols * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&b_out_dev, cols * sizeof(float)));
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
int grid_size, block_size = 256;
grid_size = (int)ceil((float) L * M * N / block_size);
kernel_colSum<<<grid_size, block_size>>>(r_dev, g_dev, b_dev, r_out_dev, g_out_dev, b_out_dev, rows, cols);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
float aux_dt = dt;
float duration;
cudaEventRecord(ct1);
kernel_colDiv<<<grid_size, block_size>>>(r_out_dev, g_out_dev, b_out_dev, L, cols);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cudaDeviceSynchronize();
duration = dt+aux_dt;
std::cout << "Tiempo GPU: " << duration << "[ms]" << std::endl;
CUDA_CHECK(cudaMemcpy(r_out_host, r_out_dev, cols * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(g_out_host, g_out_dev, cols * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(b_out_host, b_out_dev, cols * sizeof(float), cudaMemcpyDeviceToHost));
// Dividing by L de R, G and B Channels
/*
for (int j = 0; j < cols; j++)
{
r_out_host[j] /= L;
g_out_host[j] /= L;
b_out_host[j] /= L;
}
*/
set<char> delims{'/'};
vector<string> path = splitpath(input_file_name, delims);
// Escribiendo resultado en archivo
ofstream times_file;
times_file.open("results_cuda.txt", ios_base::app);
times_file << path.back() << " " << duration << "[ms]" << endl;
// Printing the result file
ofstream result_file;
result_file.open("result_cuda_"+path.back());
result_file << M << " " << N << endl;
for (int j = 0; j < cols-1; j++)
{
result_file << r_out_host[j] << " ";
}
result_file << r_out_host[cols-1] << endl;
for (int j = 0; j < cols-1; j++)
{
result_file << g_out_host[j] << " ";
}
result_file << g_out_host[cols-1] << endl;
for (int j = 0; j < cols-1; j++)
{
result_file << b_out_host[j] << " ";
}
result_file << b_out_host[cols-1];
CUDA_CHECK(cudaFree(r_dev));
CUDA_CHECK(cudaFree(g_dev));
CUDA_CHECK(cudaFree(b_dev));
CUDA_CHECK(cudaFree(r_out_dev));
CUDA_CHECK(cudaFree(g_out_dev));
CUDA_CHECK(cudaFree(b_out_dev));
free(r_host);
free(g_host);
free(b_host);
free(r_out_host);
free(g_out_host);
free(b_out_host);
times_file.close();
infile.close();
result_file.close();
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <string>
#include <vector>
#include <set>
using namespace std;
vector<string> splitpath( const string& str, const set<char> delimiters)
{
vector<string> result;
char const* pch = str.c_str();
char const* start = pch;
for(; *pch; ++pch)
{
if (delimiters.find(*pch) != delimiters.end())
{
if (start != pch)
{
string str(start, pch);
result.push_back(str);
}
else
{
result.push_back("");
}
start = pch + 1;
}
}
result.push_back(start);
return result;
}
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
cout << hipGetErrorString(error) << endl; \
} \
} while (0)
// Sumar cada columna(pixel) de las imagenes en paralelo
__global__ void kernel_colSum(float *r_in, float *g_in, float *b_in,
float *r_result, float *g_result, float *b_result , int nrow, int ncol) {
int colIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (colIdx < ncol) {
float sum_r=0;
float sum_g=0;
float sum_b=0;
for (int k = 0 ; k < nrow ; k++) {
sum_r+=r_in[colIdx+ncol*k];
sum_g+=g_in[colIdx+ncol*k];
sum_b+=b_in[colIdx+ncol*k];
}
r_result[colIdx] = sum_r;
g_result[colIdx] = sum_g;
b_result[colIdx] = sum_b;
}
}
// Dividir cada canal por la cantidad de imagenes L
__global__ void kernel_colDiv(float *r_out, float *g_out, float *b_out, int nImg, int size) {
int Idx = threadIdx.x + blockIdx.x * blockDim.x;
if (Idx < size) {
r_out[Idx] = r_out[Idx]/nImg;
g_out[Idx] = g_out[Idx]/nImg;
b_out[Idx] = b_out[Idx]/nImg;
}
}
int main(int argc, char *argv[]){
string input_file_name;
if (argc > 1) {
input_file_name = argv[1];
}
ifstream infile;
infile.open(input_file_name.c_str());
int L,M,N, rows, cols, total_pixels;
float *r_host, *g_host, *b_host, *r_out_host, *g_out_host, *b_out_host;
float *r_dev, *g_dev, *b_dev, *r_out_dev, *g_out_dev, *b_out_dev;
infile >> L >> M >> N;
rows = L;
cols = M*N;
total_pixels = rows*cols;
// Allocating matrix
r_host = (float *)malloc(total_pixels * sizeof(float));
g_host = (float *)malloc(total_pixels * sizeof(float));
b_host = (float *)malloc(total_pixels * sizeof(float));
r_out_host = (float *)malloc(cols * sizeof(float));
g_out_host = (float *)malloc(cols * sizeof(float));
b_out_host = (float *)malloc(cols * sizeof(float));
// Initialize with zeros
// I didn't use Calloc because it doesn't work with floats
for (int j = 0; j < cols; j++)
{
r_out_host[j] = 0.5;
g_out_host[j] = 0.5;
b_out_host[j] = 0.5;
}
// Reading matrix
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
infile >> r_host[i*cols+j];
}
for (int j = 0; j < cols; j++)
{
infile >> g_host[i*cols+j];
}
for (int j = 0; j < cols; j++)
{
infile >> b_host[i*cols+j];
}
}
hipEvent_t ct1, ct2;
float dt;
CUDA_CHECK(hipMalloc((void**)&r_dev, total_pixels * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&g_dev, total_pixels * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&b_dev, total_pixels * sizeof(float)));
// Input matrix of images
CUDA_CHECK(hipMemcpy(r_dev, r_host, total_pixels * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(g_dev, g_host, total_pixels * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(b_dev, b_host, total_pixels * sizeof(float), hipMemcpyHostToDevice));
// Output image
CUDA_CHECK(hipMalloc((void**)&r_out_dev, cols * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&g_out_dev, cols * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&b_out_dev, cols * sizeof(float)));
hipEventCreate(&ct1);
hipEventCreate(&ct2);
hipEventRecord(ct1);
int grid_size, block_size = 256;
grid_size = (int)ceil((float) L * M * N / block_size);
kernel_colSum<<<grid_size, block_size>>>(r_dev, g_dev, b_dev, r_out_dev, g_out_dev, b_out_dev, rows, cols);
hipEventRecord(ct2);
hipEventSynchronize(ct2);
hipEventElapsedTime(&dt, ct1, ct2);
float aux_dt = dt;
float duration;
hipEventRecord(ct1);
kernel_colDiv<<<grid_size, block_size>>>(r_out_dev, g_out_dev, b_out_dev, L, cols);
hipEventRecord(ct2);
hipEventSynchronize(ct2);
hipEventElapsedTime(&dt, ct1, ct2);
hipDeviceSynchronize();
duration = dt+aux_dt;
std::cout << "Tiempo GPU: " << duration << "[ms]" << std::endl;
CUDA_CHECK(hipMemcpy(r_out_host, r_out_dev, cols * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(g_out_host, g_out_dev, cols * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(b_out_host, b_out_dev, cols * sizeof(float), hipMemcpyDeviceToHost));
// Dividing by L de R, G and B Channels
/*
for (int j = 0; j < cols; j++)
{
r_out_host[j] /= L;
g_out_host[j] /= L;
b_out_host[j] /= L;
}
*/
set<char> delims{'/'};
vector<string> path = splitpath(input_file_name, delims);
// Escribiendo resultado en archivo
ofstream times_file;
times_file.open("results_cuda.txt", ios_base::app);
times_file << path.back() << " " << duration << "[ms]" << endl;
// Printing the result file
ofstream result_file;
result_file.open("result_cuda_"+path.back());
result_file << M << " " << N << endl;
for (int j = 0; j < cols-1; j++)
{
result_file << r_out_host[j] << " ";
}
result_file << r_out_host[cols-1] << endl;
for (int j = 0; j < cols-1; j++)
{
result_file << g_out_host[j] << " ";
}
result_file << g_out_host[cols-1] << endl;
for (int j = 0; j < cols-1; j++)
{
result_file << b_out_host[j] << " ";
}
result_file << b_out_host[cols-1];
CUDA_CHECK(hipFree(r_dev));
CUDA_CHECK(hipFree(g_dev));
CUDA_CHECK(hipFree(b_dev));
CUDA_CHECK(hipFree(r_out_dev));
CUDA_CHECK(hipFree(g_out_dev));
CUDA_CHECK(hipFree(b_out_dev));
free(r_host);
free(g_host);
free(b_host);
free(r_out_host);
free(g_out_host);
free(b_out_host);
times_file.close();
infile.close();
result_file.close();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <string>
#include <vector>
#include <set>
using namespace std;
vector<string> splitpath( const string& str, const set<char> delimiters)
{
vector<string> result;
char const* pch = str.c_str();
char const* start = pch;
for(; *pch; ++pch)
{
if (delimiters.find(*pch) != delimiters.end())
{
if (start != pch)
{
string str(start, pch);
result.push_back(str);
}
else
{
result.push_back("");
}
start = pch + 1;
}
}
result.push_back(start);
return result;
}
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
cout << hipGetErrorString(error) << endl; \
} \
} while (0)
// Sumar cada columna(pixel) de las imagenes en paralelo
__global__ void kernel_colSum(float *r_in, float *g_in, float *b_in,
float *r_result, float *g_result, float *b_result , int nrow, int ncol) {
int colIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (colIdx < ncol) {
float sum_r=0;
float sum_g=0;
float sum_b=0;
for (int k = 0 ; k < nrow ; k++) {
sum_r+=r_in[colIdx+ncol*k];
sum_g+=g_in[colIdx+ncol*k];
sum_b+=b_in[colIdx+ncol*k];
}
r_result[colIdx] = sum_r;
g_result[colIdx] = sum_g;
b_result[colIdx] = sum_b;
}
}
// Dividir cada canal por la cantidad de imagenes L
__global__ void kernel_colDiv(float *r_out, float *g_out, float *b_out, int nImg, int size) {
int Idx = threadIdx.x + blockIdx.x * blockDim.x;
if (Idx < size) {
r_out[Idx] = r_out[Idx]/nImg;
g_out[Idx] = g_out[Idx]/nImg;
b_out[Idx] = b_out[Idx]/nImg;
}
}
int main(int argc, char *argv[]){
string input_file_name;
if (argc > 1) {
input_file_name = argv[1];
}
ifstream infile;
infile.open(input_file_name.c_str());
int L,M,N, rows, cols, total_pixels;
float *r_host, *g_host, *b_host, *r_out_host, *g_out_host, *b_out_host;
float *r_dev, *g_dev, *b_dev, *r_out_dev, *g_out_dev, *b_out_dev;
infile >> L >> M >> N;
rows = L;
cols = M*N;
total_pixels = rows*cols;
// Allocating matrix
r_host = (float *)malloc(total_pixels * sizeof(float));
g_host = (float *)malloc(total_pixels * sizeof(float));
b_host = (float *)malloc(total_pixels * sizeof(float));
r_out_host = (float *)malloc(cols * sizeof(float));
g_out_host = (float *)malloc(cols * sizeof(float));
b_out_host = (float *)malloc(cols * sizeof(float));
// Initialize with zeros
// I didn't use Calloc because it doesn't work with floats
for (int j = 0; j < cols; j++)
{
r_out_host[j] = 0.5;
g_out_host[j] = 0.5;
b_out_host[j] = 0.5;
}
// Reading matrix
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
infile >> r_host[i*cols+j];
}
for (int j = 0; j < cols; j++)
{
infile >> g_host[i*cols+j];
}
for (int j = 0; j < cols; j++)
{
infile >> b_host[i*cols+j];
}
}
hipEvent_t ct1, ct2;
float dt;
CUDA_CHECK(hipMalloc((void**)&r_dev, total_pixels * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&g_dev, total_pixels * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&b_dev, total_pixels * sizeof(float)));
// Input matrix of images
CUDA_CHECK(hipMemcpy(r_dev, r_host, total_pixels * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(g_dev, g_host, total_pixels * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(b_dev, b_host, total_pixels * sizeof(float), hipMemcpyHostToDevice));
// Output image
CUDA_CHECK(hipMalloc((void**)&r_out_dev, cols * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&g_out_dev, cols * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&b_out_dev, cols * sizeof(float)));
hipEventCreate(&ct1);
hipEventCreate(&ct2);
hipEventRecord(ct1);
int grid_size, block_size = 256;
grid_size = (int)ceil((float) L * M * N / block_size);
kernel_colSum<<<grid_size, block_size>>>(r_dev, g_dev, b_dev, r_out_dev, g_out_dev, b_out_dev, rows, cols);
hipEventRecord(ct2);
hipEventSynchronize(ct2);
hipEventElapsedTime(&dt, ct1, ct2);
float aux_dt = dt;
float duration;
hipEventRecord(ct1);
kernel_colDiv<<<grid_size, block_size>>>(r_out_dev, g_out_dev, b_out_dev, L, cols);
hipEventRecord(ct2);
hipEventSynchronize(ct2);
hipEventElapsedTime(&dt, ct1, ct2);
hipDeviceSynchronize();
duration = dt+aux_dt;
std::cout << "Tiempo GPU: " << duration << "[ms]" << std::endl;
CUDA_CHECK(hipMemcpy(r_out_host, r_out_dev, cols * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(g_out_host, g_out_dev, cols * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(b_out_host, b_out_dev, cols * sizeof(float), hipMemcpyDeviceToHost));
// Dividing by L de R, G and B Channels
/*
for (int j = 0; j < cols; j++)
{
r_out_host[j] /= L;
g_out_host[j] /= L;
b_out_host[j] /= L;
}
*/
set<char> delims{'/'};
vector<string> path = splitpath(input_file_name, delims);
// Escribiendo resultado en archivo
ofstream times_file;
times_file.open("results_cuda.txt", ios_base::app);
times_file << path.back() << " " << duration << "[ms]" << endl;
// Printing the result file
ofstream result_file;
result_file.open("result_cuda_"+path.back());
result_file << M << " " << N << endl;
for (int j = 0; j < cols-1; j++)
{
result_file << r_out_host[j] << " ";
}
result_file << r_out_host[cols-1] << endl;
for (int j = 0; j < cols-1; j++)
{
result_file << g_out_host[j] << " ";
}
result_file << g_out_host[cols-1] << endl;
for (int j = 0; j < cols-1; j++)
{
result_file << b_out_host[j] << " ";
}
result_file << b_out_host[cols-1];
CUDA_CHECK(hipFree(r_dev));
CUDA_CHECK(hipFree(g_dev));
CUDA_CHECK(hipFree(b_dev));
CUDA_CHECK(hipFree(r_out_dev));
CUDA_CHECK(hipFree(g_out_dev));
CUDA_CHECK(hipFree(b_out_dev));
free(r_host);
free(g_host);
free(b_host);
free(r_out_host);
free(g_out_host);
free(b_out_host);
times_file.close();
infile.close();
result_file.close();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13kernel_colSumPfS_S_S_S_S_ii
.globl _Z13kernel_colSumPfS_S_S_S_S_ii
.p2align 8
.type _Z13kernel_colSumPfS_S_S_S_S_ii,@function
_Z13kernel_colSumPfS_S_S_S_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x44
s_load_b32 s8, s[0:1], 0x34
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_5
s_load_b32 s9, s[0:1], 0x30
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v5, 0
v_mov_b32_e32 v4, 0
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s9, 1
s_cbranch_scc1 .LBB0_4
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v5, 0
v_mov_b32_e32 v2, v1
v_mov_b32_e32 v4, 0
.p2align 6
.LBB0_3:
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v3, 31, v2
s_add_i32 s9, s9, -1
s_cmp_eq_u32 s9, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[6:7], 2, v[2:3]
v_add_nc_u32_e32 v2, s8, v2
s_waitcnt lgkmcnt(0)
v_add_co_u32 v8, vcc_lo, s4, v6
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v9, vcc_lo, s5, v7, vcc_lo
v_add_co_u32 v10, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s2, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b32 v3, v[8:9], off
global_load_b32 v8, v[10:11], off
global_load_b32 v6, v[6:7], off
s_waitcnt vmcnt(1)
v_dual_add_f32 v5, v5, v3 :: v_dual_add_f32 v4, v4, v8
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v6
s_cbranch_scc0 .LBB0_3
.LBB0_4:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x18
s_load_b64 s[0:1], s[0:1], 0x28
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v6, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v2, vcc_lo
v_add_co_u32 v8, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v2, vcc_lo
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[6:7], v5, off
global_store_b32 v[8:9], v4, off
global_store_b32 v[1:2], v0, off
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13kernel_colSumPfS_S_S_S_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13kernel_colSumPfS_S_S_S_S_ii, .Lfunc_end0-_Z13kernel_colSumPfS_S_S_S_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13kernel_colDivPfS_S_ii
.globl _Z13kernel_colDivPfS_S_ii
.p2align 8
.type _Z13kernel_colDivPfS_S_ii,@function
_Z13kernel_colDivPfS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB1_2
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_ashrrev_i32_e32 v2, 31, v1
s_load_b32 s0, s[0:1], 0x18
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_cvt_f32_i32_e32 v6, s0
global_load_b32 v4, v[2:3], off
s_waitcnt vmcnt(0)
v_div_scale_f32 v5, null, v6, v6, v4
v_div_scale_f32 v9, vcc_lo, v4, v6, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v7, v5
s_waitcnt_depctr 0xfff
v_fma_f32 v8, -v5, v7, 1.0
v_fmac_f32_e32 v7, v8, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v8, v9, v7
v_fma_f32 v10, -v5, v8, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v8, v10, v7
v_fma_f32 v5, -v5, v8, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f32 v5, v5, v7, v8
v_div_fixup_f32 v7, v5, v6, v4
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
global_store_b32 v[2:3], v7, off
global_load_b32 v2, v[4:5], off
s_waitcnt vmcnt(0)
v_div_scale_f32 v3, null, v6, v6, v2
v_div_scale_f32 v9, vcc_lo, v2, v6, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v7, v3
s_waitcnt_depctr 0xfff
v_fma_f32 v8, -v3, v7, 1.0
v_fmac_f32_e32 v7, v8, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v8, v9, v7
v_fma_f32 v10, -v3, v8, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v8, v10, v7
v_fma_f32 v3, -v3, v8, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_div_fmas_f32 v3, v3, v7, v8
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
v_div_fixup_f32 v2, v3, v6, v2
global_store_b32 v[4:5], v2, off
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_div_scale_f32 v3, null, v6, v6, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v4, v3
s_waitcnt_depctr 0xfff
v_fma_f32 v5, -v3, v4, 1.0
v_fmac_f32_e32 v4, v5, v4
v_div_scale_f32 v5, vcc_lo, v2, v6, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v7, v5, v4
v_fma_f32 v8, -v3, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v7, v8, v4
v_fma_f32 v3, -v3, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f32 v3, v3, v4, v7
v_div_fixup_f32 v2, v3, v6, v2
global_store_b32 v[0:1], v2, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13kernel_colDivPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z13kernel_colDivPfS_S_ii, .Lfunc_end1-_Z13kernel_colDivPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: by_value
- .offset: 52
.size: 4
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: hidden_block_count_x
- .offset: 60
.size: 4
.value_kind: hidden_block_count_y
- .offset: 64
.size: 4
.value_kind: hidden_block_count_z
- .offset: 68
.size: 2
.value_kind: hidden_group_size_x
- .offset: 70
.size: 2
.value_kind: hidden_group_size_y
- .offset: 72
.size: 2
.value_kind: hidden_group_size_z
- .offset: 74
.size: 2
.value_kind: hidden_remainder_x
- .offset: 76
.size: 2
.value_kind: hidden_remainder_y
- .offset: 78
.size: 2
.value_kind: hidden_remainder_z
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 120
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13kernel_colSumPfS_S_S_S_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13kernel_colSumPfS_S_S_S_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13kernel_colDivPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13kernel_colDivPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void createLaplacianKernel(float *grid, float *kernel, int nrDimensions, int nrGridElements) {
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
if (x >= nrGridElements)
return;
for(int d = 0; d < nrDimensions; ++d) {
if (d == 0)
kernel[x] = grid[x];
else
kernel[x] += grid[x + d*nrGridElements];
}
} | code for sm_80
Function : _Z21createLaplacianKernelPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff057624 */
/* 0x000fc600078e00ff */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0040*/ SHF.R.S32.HI R4, RZ, 0x1f, R5 ; /* 0x0000001fff047819 */
/* 0x000fe20000011405 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R6, c[0x0][0x174], PT ; /* 0x00005d0006007a0c */
/* 0x000fc80003f06070 */
/*0070*/ ISETP.GE.U32.AND.EX P0, PT, RZ, R4, PT, P0 ; /* 0x00000004ff00720c */
/* 0x000fda0003f06100 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff077624 */
/* 0x000fe200078e00ff */
/*00a0*/ SHF.R.U32.HI R3, RZ, 0x1e, R6 ; /* 0x0000001eff037819 */
/* 0x000fe20000011606 */
/*00b0*/ IMAD.SHL.U32 R2, R6, 0x4, RZ ; /* 0x0000000406027824 */
/* 0x000fe200078e00ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00d0*/ ISETP.GE.AND P0, PT, R7, 0x1, PT ; /* 0x000000010700780c */
/* 0x000fda0003f06270 */
/*00e0*/ @P0 IADD3 R8, P1, R2, c[0x0][0x160], RZ ; /* 0x0000580002080a10 */
/* 0x000fc80007f3e0ff */
/*00f0*/ @P0 IADD3.X R9, R3, c[0x0][0x164], RZ, P1, !PT ; /* 0x0000590003090a10 */
/* 0x000fcc0000ffe4ff */
/*0100*/ @P0 LDG.E R9, [R8.64] ; /* 0x0000000408090981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*0120*/ IADD3 R2, P2, R2, c[0x0][0x168], RZ ; /* 0x00005a0002027a10 */
/* 0x000fe20007f5e0ff */
/*0130*/ @P0 IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff000424 */
/* 0x000fc600078e00ff */
/*0140*/ IADD3.X R3, R3, c[0x0][0x16c], RZ, P2, !PT ; /* 0x00005b0003037a10 */
/* 0x000fe400017fe4ff */
/*0150*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fc60003f26270 */
/*0160*/ @P0 STG.E [R2.64], R9 ; /* 0x0000000902000986 */
/* 0x0041f4000c101904 */
/*0170*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0180*/ IADD3 R8, -R0, c[0x0][0x170], RZ ; /* 0x00005c0000087a10 */
/* 0x000fe40007ffe1ff */
/*0190*/ LOP3.LUT R9, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff097212 */
/* 0x001fc400078e33ff */
/*01a0*/ LOP3.LUT P1, R8, R8, 0x3, RZ, 0xc0, !PT ; /* 0x0000000308087812 */
/* 0x000fe4000782c0ff */
/*01b0*/ IADD3 R9, R9, c[0x0][0x170], RZ ; /* 0x00005c0009097a10 */
/* 0x000fc80007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P0, PT, R9, 0x3, PT ; /* 0x000000030900780c */
/* 0x000fce0003f06070 */
/*01d0*/ @!P1 BRA 0x310 ; /* 0x0000013000009947 */
/* 0x000fec0003800000 */
/*01e0*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */
/* 0x000162000c1e1900 */
/*01f0*/ IMAD R9, R0, c[0x0][0x174], RZ ; /* 0x00005d0000097a24 */
/* 0x000fe400078e02ff */
/*0200*/ IMAD.MOV.U32 R10, RZ, RZ, R8 ; /* 0x000000ffff0a7224 */
/* 0x000fc600078e0008 */
/*0210*/ IADD3 R13, P1, R6, R9, RZ ; /* 0x00000009060d7210 */
/* 0x000fc80007f3e0ff */
/*0220*/ LEA.HI.X.SX32 R14, R9, RZ, 0x1, P1 ; /* 0x000000ff090e7211 */
/* 0x000fe400008f0eff */
/*0230*/ LEA R12, P1, R13, c[0x0][0x160], 0x2 ; /* 0x000058000d0c7a11 */
/* 0x000fc800078210ff */
/*0240*/ LEA.HI.X R13, R13, c[0x0][0x164], R14, 0x2, P1 ; /* 0x000059000d0d7a11 */
/* 0x000fe400008f140e */
/*0250*/ SHF.L.U64.HI R14, R5, 0x2, R4 ; /* 0x00000002050e7819 */
/* 0x001fe40000010204 */
/*0260*/ IMAD.MOV.U32 R8, RZ, RZ, R12 ; /* 0x000000ffff087224 */
/* 0x000fe200078e000c */
/*0270*/ MOV R9, R13 ; /* 0x0000000d00097202 */
/* 0x000fca0000000f00 */
/*0280*/ LDG.E R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000ea2000c1e1900 */
/*0290*/ IADD3 R10, R10, -0x1, RZ ; /* 0xffffffff0a0a7810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ LEA R12, P2, R5, R12, 0x2 ; /* 0x0000000c050c7211 */
/* 0x000fe400078410ff */
/*02b0*/ ISETP.NE.AND P1, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe40003f25270 */
/*02c0*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */
/* 0x000fe20007ffe0ff */
/*02d0*/ IMAD.X R13, R13, 0x1, R14, P2 ; /* 0x000000010d0d7824 */
/* 0x000fe400010e060e */
/*02e0*/ FADD R11, R8, R11 ; /* 0x0000000b080b7221 */
/* 0x025fca0000000000 */
/*02f0*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0001e6000c101904 */
/*0300*/ @P1 BRA 0x260 ; /* 0xffffff5000001947 */
/* 0x000fea000383ffff */
/*0310*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0320*/ IADD3 R8, -R0.reuse, c[0x0][0x170], RZ ; /* 0x00005c0000087a10 */
/* 0x040fe20007ffe1ff */
/*0330*/ IMAD R9, R0, c[0x0][0x174], RZ ; /* 0x00005d0000097a24 */
/* 0x000fe200078e02ff */
/*0340*/ SHF.L.U64.HI R4, R5.reuse, 0x2, R4 ; /* 0x0000000205047819 */
/* 0x040fe20000010204 */
/*0350*/ IMAD.SHL.U32 R5, R5, 0x4, RZ ; /* 0x0000000405057824 */
/* 0x000fe200078e00ff */
/*0360*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*0370*/ IADD3 R6, P0, R6, R9, RZ ; /* 0x0000000906067210 */
/* 0x000fc80007f1e0ff */
/*0380*/ LEA.HI.X.SX32 R9, R9, RZ, 0x1, P0 ; /* 0x000000ff09097211 */
/* 0x000fe400000f0eff */
/*0390*/ LEA R12, P0, R6, c[0x0][0x160], 0x2 ; /* 0x00005800060c7a11 */
/* 0x000fc800078010ff */
/*03a0*/ LEA.HI.X R13, R6, c[0x0][0x164], R9, 0x2, P0 ; /* 0x00005900060d7a11 */
/* 0x000fe400000f1409 */
/*03b0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000362000c1e1900 */
/*03c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0f070 */
/*03d0*/ @!P1 BRA 0x950 ; /* 0x0000057000009947 */
/* 0x000fea0003800000 */
/*03e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*03f0*/ IADD3 R7, R7, -0xc, RZ ; /* 0xfffffff407077810 */
/* 0x000fca0007ffe0ff */
/*0400*/ IMAD.MOV.U32 R10, RZ, RZ, R12 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e000c */
/*0410*/ IMAD.MOV.U32 R11, RZ, RZ, R13 ; /* 0x000000ffff0b7224 */
/* 0x001fca00078e000d */
/*0420*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000ea2000c1e1900 */
/*0430*/ IADD3 R12, P1, R12, R5, RZ ; /* 0x000000050c0c7210 */
/* 0x000fca0007f3e0ff */
/*0440*/ IMAD.X R13, R13, 0x1, R4, P1 ; /* 0x000000010d0d7824 */
/* 0x000fe400008e0604 */
/*0450*/ FADD R15, R6, R9 ; /* 0x00000009060f7221 */
/* 0x024fca0000000000 */
/*0460*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0001e8000c101904 */
/*0470*/ LDG.E R6, [R12.64] ; /* 0x000000040c067981 */
/* 0x000ea2000c1e1900 */
/*0480*/ IADD3 R8, P1, R12, R5, RZ ; /* 0x000000050c087210 */
/* 0x000fca0007f3e0ff */
/*0490*/ IMAD.X R9, R13, 0x1, R4, P1 ; /* 0x000000010d097824 */
/* 0x010fe400008e0604 */
/*04a0*/ FADD R17, R15, R6 ; /* 0x000000060f117221 */
/* 0x004fca0000000000 */
/*04b0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0005e8000c101904 */
/*04c0*/ LDG.E R6, [R8.64] ; /* 0x0000000408067981 */
/* 0x000ee2000c1e1900 */
/*04d0*/ IADD3 R10, P1, R8, R5, RZ ; /* 0x00000005080a7210 */
/* 0x000fca0007f3e0ff */
/*04e0*/ IMAD.X R11, R9, 0x1, R4, P1 ; /* 0x00000001090b7824 */
/* 0x000fe400008e0604 */
/*04f0*/ FADD R19, R17, R6 ; /* 0x0000000611137221 */
/* 0x008fca0000000000 */
/*0500*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0007e8000c101904 */
/*0510*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000e22000c1e1900 */
/*0520*/ IADD3 R12, P1, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fc80007f3e0ff */
/*0530*/ IADD3.X R13, R11, R4, RZ, P1, !PT ; /* 0x000000040b0d7210 */
/* 0x000fe20000ffe4ff */
/*0540*/ FADD R15, R19, R6 ; /* 0x00000006130f7221 */
/* 0x001fca0000000000 */
/*0550*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0001e8000c101904 */
/*0560*/ LDG.E R6, [R12.64] ; /* 0x000000040c067981 */
/* 0x000ea2000c1e1900 */
/*0570*/ IADD3 R8, P1, R12, R5, RZ ; /* 0x000000050c087210 */
/* 0x000fca0007f3e0ff */
/*0580*/ IMAD.X R9, R13, 0x1, R4, P1 ; /* 0x000000010d097824 */
/* 0x000fe400008e0604 */
/*0590*/ FADD R17, R15, R6 ; /* 0x000000060f117221 */
/* 0x004fca0000000000 */
/*05a0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0005e8000c101904 */
/*05b0*/ LDG.E R6, [R8.64] ; /* 0x0000000408067981 */
/* 0x000ee2000c1e1900 */
/*05c0*/ IADD3 R10, P1, R8, R5, RZ ; /* 0x00000005080a7210 */
/* 0x000fca0007f3e0ff */
/*05d0*/ IMAD.X R11, R9, 0x1, R4, P1 ; /* 0x00000001090b7824 */
/* 0x000fe400008e0604 */
/*05e0*/ FADD R19, R17, R6 ; /* 0x0000000611137221 */
/* 0x008fca0000000000 */
/*05f0*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0007e8000c101904 */
/*0600*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000e22000c1e1900 */
/*0610*/ IADD3 R12, P1, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fca0007f3e0ff */
/*0620*/ IMAD.X R13, R11, 0x1, R4, P1 ; /* 0x000000010b0d7824 */
/* 0x000fe400008e0604 */
/*0630*/ FADD R15, R19, R6 ; /* 0x00000006130f7221 */
/* 0x001fca0000000000 */
/*0640*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0001e8000c101904 */
/*0650*/ LDG.E R6, [R12.64] ; /* 0x000000040c067981 */
/* 0x000ea2000c1e1900 */
/*0660*/ IADD3 R8, P1, R12, R5, RZ ; /* 0x000000050c087210 */
/* 0x000fca0007f3e0ff */
/*0670*/ IMAD.X R9, R13, 0x1, R4, P1 ; /* 0x000000010d097824 */
/* 0x000fe400008e0604 */
/*0680*/ FADD R17, R15, R6 ; /* 0x000000060f117221 */
/* 0x004fca0000000000 */
/*0690*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0005e8000c101904 */
/*06a0*/ LDG.E R6, [R8.64] ; /* 0x0000000408067981 */
/* 0x000ee2000c1e1900 */
/*06b0*/ IADD3 R10, P1, R8, R5, RZ ; /* 0x00000005080a7210 */
/* 0x000fca0007f3e0ff */
/*06c0*/ IMAD.X R11, R9, 0x1, R4, P1 ; /* 0x00000001090b7824 */
/* 0x000fe400008e0604 */
/*06d0*/ FADD R19, R17, R6 ; /* 0x0000000611137221 */
/* 0x008fca0000000000 */
/*06e0*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0007e8000c101904 */
/*06f0*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000e22000c1e1900 */
/*0700*/ IADD3 R12, P1, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fca0007f3e0ff */
/*0710*/ IMAD.X R13, R11, 0x1, R4, P1 ; /* 0x000000010b0d7824 */
/* 0x000fe400008e0604 */
/*0720*/ FADD R15, R19, R6 ; /* 0x00000006130f7221 */
/* 0x001fca0000000000 */
/*0730*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0001e8000c101904 */
/*0740*/ LDG.E R6, [R12.64] ; /* 0x000000040c067981 */
/* 0x000ea2000c1e1900 */
/*0750*/ IADD3 R8, P1, R12, R5, RZ ; /* 0x000000050c087210 */
/* 0x000fca0007f3e0ff */
/*0760*/ IMAD.X R9, R13, 0x1, R4, P1 ; /* 0x000000010d097824 */
/* 0x000fe400008e0604 */
/*0770*/ FADD R17, R15, R6 ; /* 0x000000060f117221 */
/* 0x004fca0000000000 */
/*0780*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0005e8000c101904 */
/*0790*/ LDG.E R6, [R8.64] ; /* 0x0000000408067981 */
/* 0x000ee2000c1e1900 */
/*07a0*/ IADD3 R10, P1, R8, R5, RZ ; /* 0x00000005080a7210 */
/* 0x000fc80007f3e0ff */
/*07b0*/ IADD3.X R11, R9, R4, RZ, P1, !PT ; /* 0x00000004090b7210 */
/* 0x000fe20000ffe4ff */
/*07c0*/ FADD R19, R17, R6 ; /* 0x0000000611137221 */
/* 0x008fca0000000000 */
/*07d0*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0007e8000c101904 */
/*07e0*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000f22000c1e1900 */
/*07f0*/ IADD3 R12, P1, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fca0007f3e0ff */
/*0800*/ IMAD.X R13, R11, 0x1, R4, P1 ; /* 0x000000010b0d7824 */
/* 0x000fe400008e0604 */
/*0810*/ FADD R21, R19, R6 ; /* 0x0000000613157221 */
/* 0x010fca0000000000 */
/*0820*/ STG.E [R2.64], R21 ; /* 0x0000001502007986 */
/* 0x0009e8000c101904 */
/*0830*/ LDG.E R6, [R12.64] ; /* 0x000000040c067981 */
/* 0x000ea2000c1e1900 */
/*0840*/ IADD3 R14, P1, R12, R5, RZ ; /* 0x000000050c0e7210 */
/* 0x000fca0007f3e0ff */
/*0850*/ IMAD.X R15, R13, 0x1, R4, P1 ; /* 0x000000010d0f7824 */
/* 0x001fe400008e0604 */
/*0860*/ FADD R17, R21, R6 ; /* 0x0000000615117221 */
/* 0x004fca0000000000 */
/*0870*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0009e8000c101904 */
/*0880*/ LDG.E R6, [R14.64] ; /* 0x000000040e067981 */
/* 0x000ee2000c1e1900 */
/*0890*/ IADD3 R10, P1, R14, R5, RZ ; /* 0x000000050e0a7210 */
/* 0x000fca0007f3e0ff */
/*08a0*/ IMAD.X R11, R15, 0x1, R4, P1 ; /* 0x000000010f0b7824 */
/* 0x000fe400008e0604 */
/*08b0*/ FADD R19, R17, R6 ; /* 0x0000000611137221 */
/* 0x008fca0000000000 */
/*08c0*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0009e8000c101904 */
/*08d0*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000ea2000c1e1900 */
/*08e0*/ IADD3 R0, R0, 0x10, RZ ; /* 0x0000001000007810 */
/* 0x000fe40007ffe0ff */
/*08f0*/ IADD3 R12, P2, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fe40007f5e0ff */
/*0900*/ ISETP.GE.AND P1, PT, R0, R7, PT ; /* 0x000000070000720c */
/* 0x000fc60003f26270 */
/*0910*/ IMAD.X R13, R11, 0x1, R4, P2 ; /* 0x000000010b0d7824 */
/* 0x000fe400010e0604 */
/*0920*/ FADD R9, R19, R6 ; /* 0x0000000613097221 */
/* 0x004fca0000000000 */
/*0930*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0009e6000c101904 */
/*0940*/ @!P1 BRA 0x400 ; /* 0xfffffab000009947 */
/* 0x000fea000383ffff */
/*0950*/ IADD3 R6, -R0, c[0x0][0x170], RZ ; /* 0x00005c0000067a10 */
/* 0x000fc80007ffe1ff */
/*0960*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*0970*/ @!P1 BRA 0xc40 ; /* 0x000002c000009947 */
/* 0x000fea0003800000 */
/*0980*/ IMAD.MOV.U32 R6, RZ, RZ, R12 ; /* 0x000000ffff067224 */
/* 0x000fe400078e000c */
/*0990*/ IMAD.MOV.U32 R7, RZ, RZ, R13 ; /* 0x000000ffff077224 */
/* 0x000fca00078e000d */
/*09a0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea2000c1e1900 */
/*09b0*/ IADD3 R10, P0, R12, R5, RZ ; /* 0x000000050c0a7210 */
/* 0x000fc80007f1e0ff */
/*09c0*/ IADD3.X R11, R13, R4, RZ, P0, !PT ; /* 0x000000040d0b7210 */
/* 0x001fe200007fe4ff */
/*09d0*/ FADD R13, R9, R6 ; /* 0x00000006090d7221 */
/* 0x024fca0000000000 */
/*09e0*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x0001e8000c101904 */
/*09f0*/ LDG.E R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1900 */
/*0a00*/ IADD3 R8, P0, R10, R5, RZ ; /* 0x000000050a087210 */
/* 0x000fca0007f1e0ff */
/*0a10*/ IMAD.X R9, R11, 0x1, R4, P0 ; /* 0x000000010b097824 */
/* 0x010fe400000e0604 */
/*0a20*/ FADD R15, R13, R12 ; /* 0x0000000c0d0f7221 */
/* 0x004fca0000000000 */
/*0a30*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0005e8000c101904 */
/*0a40*/ LDG.E R12, [R8.64] ; /* 0x00000004080c7981 */
/* 0x000ee2000c1e1900 */
/*0a50*/ IADD3 R6, P0, R8, R5, RZ ; /* 0x0000000508067210 */
/* 0x000fca0007f1e0ff */
/*0a60*/ IMAD.X R7, R9, 0x1, R4, P0 ; /* 0x0000000109077824 */
/* 0x000fe400000e0604 */
/*0a70*/ FADD R17, R15, R12 ; /* 0x0000000c0f117221 */
/* 0x008fca0000000000 */
/*0a80*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0007e8000c101904 */
/*0a90*/ LDG.E R12, [R6.64] ; /* 0x00000004060c7981 */
/* 0x000e22000c1e1900 */
/*0aa0*/ IADD3 R10, P0, R6, R5, RZ ; /* 0x00000005060a7210 */
/* 0x000fca0007f1e0ff */
/*0ab0*/ IMAD.X R11, R7, 0x1, R4, P0 ; /* 0x00000001070b7824 */
/* 0x000fe400000e0604 */
/*0ac0*/ FADD R13, R17, R12 ; /* 0x0000000c110d7221 */
/* 0x001fca0000000000 */
/*0ad0*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x0001e8000c101904 */
/*0ae0*/ LDG.E R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1900 */
/*0af0*/ IADD3 R8, P0, R10, R5, RZ ; /* 0x000000050a087210 */
/* 0x000fca0007f1e0ff */
/*0b00*/ IMAD.X R9, R11, 0x1, R4, P0 ; /* 0x000000010b097824 */
/* 0x000fe400000e0604 */
/*0b10*/ FADD R15, R13, R12 ; /* 0x0000000c0d0f7221 */
/* 0x004fca0000000000 */
/*0b20*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x000fe8000c101904 */
/*0b30*/ LDG.E R12, [R8.64] ; /* 0x00000004080c7981 */
/* 0x000ee2000c1e1900 */
/*0b40*/ IADD3 R6, P0, R8, R5, RZ ; /* 0x0000000508067210 */
/* 0x000fca0007f1e0ff */
/*0b50*/ IMAD.X R7, R9, 0x1, R4, P0 ; /* 0x0000000109077824 */
/* 0x000fe400000e0604 */
/*0b60*/ FADD R17, R15, R12 ; /* 0x0000000c0f117221 */
/* 0x008fca0000000000 */
/*0b70*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x000fe8000c101904 */
/*0b80*/ LDG.E R12, [R6.64] ; /* 0x00000004060c7981 */
/* 0x000e22000c1e1900 */
/*0b90*/ IADD3 R10, P0, R6, R5, RZ ; /* 0x00000005060a7210 */
/* 0x000fca0007f1e0ff */
/*0ba0*/ IMAD.X R11, R7, 0x1, R4, P0 ; /* 0x00000001070b7824 */
/* 0x000fe400000e0604 */
/*0bb0*/ FADD R13, R17, R12 ; /* 0x0000000c110d7221 */
/* 0x001fca0000000000 */
/*0bc0*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x0001e8000c101904 */
/*0bd0*/ LDG.E R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1900 */
/*0be0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0bf0*/ IADD3 R0, R0, 0x8, RZ ; /* 0x0000000800007810 */
/* 0x000fe20007ffe0ff */
/*0c00*/ FADD R9, R13, R12 ; /* 0x0000000c0d097221 */
/* 0x004fe20000000000 */
/*0c10*/ IADD3 R12, P1, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fc80007f3e0ff */
/*0c20*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0005e2000c101904 */
/*0c30*/ IADD3.X R13, R11, R4, RZ, P1, !PT ; /* 0x000000040b0d7210 */
/* 0x001fc60000ffe4ff */
/*0c40*/ ISETP.LT.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fda0000701670 */
/*0c50*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0c60*/ IMAD.MOV.U32 R6, RZ, RZ, R12 ; /* 0x000000ffff067224 */
/* 0x000fe400078e000c */
/*0c70*/ IMAD.MOV.U32 R7, RZ, RZ, R13 ; /* 0x000000ffff077224 */
/* 0x000fca00078e000d */
/*0c80*/ LDG.E R0, [R6.64] ; /* 0x0000000406007981 */
/* 0x000ee2000c1e1900 */
/*0c90*/ IADD3 R10, P0, R12, R5, RZ ; /* 0x000000050c0a7210 */
/* 0x000fca0007f1e0ff */
/*0ca0*/ IMAD.X R11, R13, 0x1, R4, P0 ; /* 0x000000010d0b7824 */
/* 0x001fe400000e0604 */
/*0cb0*/ FADD R13, R0, R9 ; /* 0x00000009000d7221 */
/* 0x028fca0000000000 */
/*0cc0*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x000fe8000c101904 */
/*0cd0*/ LDG.E R0, [R10.64] ; /* 0x000000040a007981 */
/* 0x000ee2000c1e1900 */
/*0ce0*/ IADD3 R8, P0, R10, R5, RZ ; /* 0x000000050a087210 */
/* 0x000fca0007f1e0ff */
/*0cf0*/ IMAD.X R9, R11, 0x1, R4, P0 ; /* 0x000000010b097824 */
/* 0x014fe400000e0604 */
/*0d00*/ FADD R15, R13, R0 ; /* 0x000000000d0f7221 */
/* 0x008fca0000000000 */
/*0d10*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x000fe8000c101904 */
/*0d20*/ LDG.E R0, [R8.64] ; /* 0x0000000408007981 */
/* 0x000ea2000c1e1900 */
/*0d30*/ IADD3 R6, P0, R8, R5, RZ ; /* 0x0000000508067210 */
/* 0x000fca0007f1e0ff */
/*0d40*/ IMAD.X R7, R9, 0x1, R4, P0 ; /* 0x0000000109077824 */
/* 0x000fe400000e0604 */
/*0d50*/ FADD R5, R15, R0 ; /* 0x000000000f057221 */
/* 0x004fca0000000000 */
/*0d60*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*0d70*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea4000c1e1900 */
/*0d80*/ FADD R11, R5, R6 ; /* 0x00000006050b7221 */
/* 0x004fca0000000000 */
/*0d90*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x000fe2000c101904 */
/*0da0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0db0*/ BRA 0xdb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0dc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0dd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0de0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0df0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void createLaplacianKernel(float *grid, float *kernel, int nrDimensions, int nrGridElements) {
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
if (x >= nrGridElements)
return;
for(int d = 0; d < nrDimensions; ++d) {
if (d == 0)
kernel[x] = grid[x];
else
kernel[x] += grid[x + d*nrGridElements];
}
} | .file "tmpxft_000dcd8c_00000000-6_createLaplacianKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii
.type _Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii, @function
_Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z21createLaplacianKernelPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii, .-_Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii
.globl _Z21createLaplacianKernelPfS_ii
.type _Z21createLaplacianKernelPfS_ii, @function
_Z21createLaplacianKernelPfS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z21createLaplacianKernelPfS_ii, .-_Z21createLaplacianKernelPfS_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z21createLaplacianKernelPfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z21createLaplacianKernelPfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void createLaplacianKernel(float *grid, float *kernel, int nrDimensions, int nrGridElements) {
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
if (x >= nrGridElements)
return;
for(int d = 0; d < nrDimensions; ++d) {
if (d == 0)
kernel[x] = grid[x];
else
kernel[x] += grid[x + d*nrGridElements];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void createLaplacianKernel(float *grid, float *kernel, int nrDimensions, int nrGridElements) {
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
if (x >= nrGridElements)
return;
for(int d = 0; d < nrDimensions; ++d) {
if (d == 0)
kernel[x] = grid[x];
else
kernel[x] += grid[x + d*nrGridElements];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void createLaplacianKernel(float *grid, float *kernel, int nrDimensions, int nrGridElements) {
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
if (x >= nrGridElements)
return;
for(int d = 0; d < nrDimensions; ++d) {
if (d == 0)
kernel[x] = grid[x];
else
kernel[x] += grid[x + d*nrGridElements];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z21createLaplacianKernelPfS_ii
.globl _Z21createLaplacianKernelPfS_ii
.p2align 8
.type _Z21createLaplacianKernelPfS_ii,@function
_Z21createLaplacianKernelPfS_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_ashr_i32 s5, s3, 31
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
s_mov_b32 s4, s3
s_cmp_gt_i32 s2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[1:2]
s_cselect_b32 s5, -1, 0
s_mov_b32 s4, 0
s_and_b32 s5, vcc_lo, s5
s_and_saveexec_b32 s6, s5
s_cbranch_execz .LBB0_7
s_load_b128 s[8:11], s[0:1], 0x0
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_mov_b32 s0, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s8, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s10, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v3, vcc_lo
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_add_i32 s0, s0, 1
s_add_i32 s4, s4, s3
s_cmp_lg_u32 s2, s0
s_cbranch_scc0 .LBB0_7
.LBB0_3:
s_cmp_lg_u32 s0, 0
s_cbranch_scc0 .LBB0_5
s_ashr_i32 s5, s4, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[6:7], s[4:5], 2
v_add_co_u32 v4, vcc_lo, v0, s6
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[2:3], off
s_waitcnt vmcnt(0)
v_add_f32_e32 v4, v4, v5
global_store_b32 v[2:3], v4, off
s_cbranch_execnz .LBB0_2
s_branch .LBB0_6
.p2align 6
.LBB0_5:
.LBB0_6:
global_load_b32 v4, v[0:1], off
s_waitcnt vmcnt(0)
global_store_b32 v[2:3], v4, off
s_branch .LBB0_2
.LBB0_7:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21createLaplacianKernelPfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z21createLaplacianKernelPfS_ii, .Lfunc_end0-_Z21createLaplacianKernelPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21createLaplacianKernelPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z21createLaplacianKernelPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void createLaplacianKernel(float *grid, float *kernel, int nrDimensions, int nrGridElements) {
size_t x = threadIdx.x + blockDim.x * blockIdx.x;
if (x >= nrGridElements)
return;
for(int d = 0; d < nrDimensions; ++d) {
if (d == 0)
kernel[x] = grid[x];
else
kernel[x] += grid[x + d*nrGridElements];
}
} | .text
.file "createLaplacianKernel.hip"
.globl _Z36__device_stub__createLaplacianKernelPfS_ii # -- Begin function _Z36__device_stub__createLaplacianKernelPfS_ii
.p2align 4, 0x90
.type _Z36__device_stub__createLaplacianKernelPfS_ii,@function
_Z36__device_stub__createLaplacianKernelPfS_ii: # @_Z36__device_stub__createLaplacianKernelPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z21createLaplacianKernelPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z36__device_stub__createLaplacianKernelPfS_ii, .Lfunc_end0-_Z36__device_stub__createLaplacianKernelPfS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21createLaplacianKernelPfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z21createLaplacianKernelPfS_ii,@object # @_Z21createLaplacianKernelPfS_ii
.section .rodata,"a",@progbits
.globl _Z21createLaplacianKernelPfS_ii
.p2align 3, 0x0
_Z21createLaplacianKernelPfS_ii:
.quad _Z36__device_stub__createLaplacianKernelPfS_ii
.size _Z21createLaplacianKernelPfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z21createLaplacianKernelPfS_ii"
.size .L__unnamed_1, 32
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z36__device_stub__createLaplacianKernelPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z21createLaplacianKernelPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z21createLaplacianKernelPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff057624 */
/* 0x000fc600078e00ff */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0040*/ SHF.R.S32.HI R4, RZ, 0x1f, R5 ; /* 0x0000001fff047819 */
/* 0x000fe20000011405 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R6, c[0x0][0x174], PT ; /* 0x00005d0006007a0c */
/* 0x000fc80003f06070 */
/*0070*/ ISETP.GE.U32.AND.EX P0, PT, RZ, R4, PT, P0 ; /* 0x00000004ff00720c */
/* 0x000fda0003f06100 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff077624 */
/* 0x000fe200078e00ff */
/*00a0*/ SHF.R.U32.HI R3, RZ, 0x1e, R6 ; /* 0x0000001eff037819 */
/* 0x000fe20000011606 */
/*00b0*/ IMAD.SHL.U32 R2, R6, 0x4, RZ ; /* 0x0000000406027824 */
/* 0x000fe200078e00ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00d0*/ ISETP.GE.AND P0, PT, R7, 0x1, PT ; /* 0x000000010700780c */
/* 0x000fda0003f06270 */
/*00e0*/ @P0 IADD3 R8, P1, R2, c[0x0][0x160], RZ ; /* 0x0000580002080a10 */
/* 0x000fc80007f3e0ff */
/*00f0*/ @P0 IADD3.X R9, R3, c[0x0][0x164], RZ, P1, !PT ; /* 0x0000590003090a10 */
/* 0x000fcc0000ffe4ff */
/*0100*/ @P0 LDG.E R9, [R8.64] ; /* 0x0000000408090981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*0120*/ IADD3 R2, P2, R2, c[0x0][0x168], RZ ; /* 0x00005a0002027a10 */
/* 0x000fe20007f5e0ff */
/*0130*/ @P0 IMAD.MOV.U32 R0, RZ, RZ, 0x1 ; /* 0x00000001ff000424 */
/* 0x000fc600078e00ff */
/*0140*/ IADD3.X R3, R3, c[0x0][0x16c], RZ, P2, !PT ; /* 0x00005b0003037a10 */
/* 0x000fe400017fe4ff */
/*0150*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fc60003f26270 */
/*0160*/ @P0 STG.E [R2.64], R9 ; /* 0x0000000902000986 */
/* 0x0041f4000c101904 */
/*0170*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*0180*/ IADD3 R8, -R0, c[0x0][0x170], RZ ; /* 0x00005c0000087a10 */
/* 0x000fe40007ffe1ff */
/*0190*/ LOP3.LUT R9, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff097212 */
/* 0x001fc400078e33ff */
/*01a0*/ LOP3.LUT P1, R8, R8, 0x3, RZ, 0xc0, !PT ; /* 0x0000000308087812 */
/* 0x000fe4000782c0ff */
/*01b0*/ IADD3 R9, R9, c[0x0][0x170], RZ ; /* 0x00005c0009097a10 */
/* 0x000fc80007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P0, PT, R9, 0x3, PT ; /* 0x000000030900780c */
/* 0x000fce0003f06070 */
/*01d0*/ @!P1 BRA 0x310 ; /* 0x0000013000009947 */
/* 0x000fec0003800000 */
/*01e0*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */
/* 0x000162000c1e1900 */
/*01f0*/ IMAD R9, R0, c[0x0][0x174], RZ ; /* 0x00005d0000097a24 */
/* 0x000fe400078e02ff */
/*0200*/ IMAD.MOV.U32 R10, RZ, RZ, R8 ; /* 0x000000ffff0a7224 */
/* 0x000fc600078e0008 */
/*0210*/ IADD3 R13, P1, R6, R9, RZ ; /* 0x00000009060d7210 */
/* 0x000fc80007f3e0ff */
/*0220*/ LEA.HI.X.SX32 R14, R9, RZ, 0x1, P1 ; /* 0x000000ff090e7211 */
/* 0x000fe400008f0eff */
/*0230*/ LEA R12, P1, R13, c[0x0][0x160], 0x2 ; /* 0x000058000d0c7a11 */
/* 0x000fc800078210ff */
/*0240*/ LEA.HI.X R13, R13, c[0x0][0x164], R14, 0x2, P1 ; /* 0x000059000d0d7a11 */
/* 0x000fe400008f140e */
/*0250*/ SHF.L.U64.HI R14, R5, 0x2, R4 ; /* 0x00000002050e7819 */
/* 0x001fe40000010204 */
/*0260*/ IMAD.MOV.U32 R8, RZ, RZ, R12 ; /* 0x000000ffff087224 */
/* 0x000fe200078e000c */
/*0270*/ MOV R9, R13 ; /* 0x0000000d00097202 */
/* 0x000fca0000000f00 */
/*0280*/ LDG.E R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000ea2000c1e1900 */
/*0290*/ IADD3 R10, R10, -0x1, RZ ; /* 0xffffffff0a0a7810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ LEA R12, P2, R5, R12, 0x2 ; /* 0x0000000c050c7211 */
/* 0x000fe400078410ff */
/*02b0*/ ISETP.NE.AND P1, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe40003f25270 */
/*02c0*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */
/* 0x000fe20007ffe0ff */
/*02d0*/ IMAD.X R13, R13, 0x1, R14, P2 ; /* 0x000000010d0d7824 */
/* 0x000fe400010e060e */
/*02e0*/ FADD R11, R8, R11 ; /* 0x0000000b080b7221 */
/* 0x025fca0000000000 */
/*02f0*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0001e6000c101904 */
/*0300*/ @P1 BRA 0x260 ; /* 0xffffff5000001947 */
/* 0x000fea000383ffff */
/*0310*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0320*/ IADD3 R8, -R0.reuse, c[0x0][0x170], RZ ; /* 0x00005c0000087a10 */
/* 0x040fe20007ffe1ff */
/*0330*/ IMAD R9, R0, c[0x0][0x174], RZ ; /* 0x00005d0000097a24 */
/* 0x000fe200078e02ff */
/*0340*/ SHF.L.U64.HI R4, R5.reuse, 0x2, R4 ; /* 0x0000000205047819 */
/* 0x040fe20000010204 */
/*0350*/ IMAD.SHL.U32 R5, R5, 0x4, RZ ; /* 0x0000000405057824 */
/* 0x000fe200078e00ff */
/*0360*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*0370*/ IADD3 R6, P0, R6, R9, RZ ; /* 0x0000000906067210 */
/* 0x000fc80007f1e0ff */
/*0380*/ LEA.HI.X.SX32 R9, R9, RZ, 0x1, P0 ; /* 0x000000ff09097211 */
/* 0x000fe400000f0eff */
/*0390*/ LEA R12, P0, R6, c[0x0][0x160], 0x2 ; /* 0x00005800060c7a11 */
/* 0x000fc800078010ff */
/*03a0*/ LEA.HI.X R13, R6, c[0x0][0x164], R9, 0x2, P0 ; /* 0x00005900060d7a11 */
/* 0x000fe400000f1409 */
/*03b0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000362000c1e1900 */
/*03c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0f070 */
/*03d0*/ @!P1 BRA 0x950 ; /* 0x0000057000009947 */
/* 0x000fea0003800000 */
/*03e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*03f0*/ IADD3 R7, R7, -0xc, RZ ; /* 0xfffffff407077810 */
/* 0x000fca0007ffe0ff */
/*0400*/ IMAD.MOV.U32 R10, RZ, RZ, R12 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e000c */
/*0410*/ IMAD.MOV.U32 R11, RZ, RZ, R13 ; /* 0x000000ffff0b7224 */
/* 0x001fca00078e000d */
/*0420*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000ea2000c1e1900 */
/*0430*/ IADD3 R12, P1, R12, R5, RZ ; /* 0x000000050c0c7210 */
/* 0x000fca0007f3e0ff */
/*0440*/ IMAD.X R13, R13, 0x1, R4, P1 ; /* 0x000000010d0d7824 */
/* 0x000fe400008e0604 */
/*0450*/ FADD R15, R6, R9 ; /* 0x00000009060f7221 */
/* 0x024fca0000000000 */
/*0460*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0001e8000c101904 */
/*0470*/ LDG.E R6, [R12.64] ; /* 0x000000040c067981 */
/* 0x000ea2000c1e1900 */
/*0480*/ IADD3 R8, P1, R12, R5, RZ ; /* 0x000000050c087210 */
/* 0x000fca0007f3e0ff */
/*0490*/ IMAD.X R9, R13, 0x1, R4, P1 ; /* 0x000000010d097824 */
/* 0x010fe400008e0604 */
/*04a0*/ FADD R17, R15, R6 ; /* 0x000000060f117221 */
/* 0x004fca0000000000 */
/*04b0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0005e8000c101904 */
/*04c0*/ LDG.E R6, [R8.64] ; /* 0x0000000408067981 */
/* 0x000ee2000c1e1900 */
/*04d0*/ IADD3 R10, P1, R8, R5, RZ ; /* 0x00000005080a7210 */
/* 0x000fca0007f3e0ff */
/*04e0*/ IMAD.X R11, R9, 0x1, R4, P1 ; /* 0x00000001090b7824 */
/* 0x000fe400008e0604 */
/*04f0*/ FADD R19, R17, R6 ; /* 0x0000000611137221 */
/* 0x008fca0000000000 */
/*0500*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0007e8000c101904 */
/*0510*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000e22000c1e1900 */
/*0520*/ IADD3 R12, P1, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fc80007f3e0ff */
/*0530*/ IADD3.X R13, R11, R4, RZ, P1, !PT ; /* 0x000000040b0d7210 */
/* 0x000fe20000ffe4ff */
/*0540*/ FADD R15, R19, R6 ; /* 0x00000006130f7221 */
/* 0x001fca0000000000 */
/*0550*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0001e8000c101904 */
/*0560*/ LDG.E R6, [R12.64] ; /* 0x000000040c067981 */
/* 0x000ea2000c1e1900 */
/*0570*/ IADD3 R8, P1, R12, R5, RZ ; /* 0x000000050c087210 */
/* 0x000fca0007f3e0ff */
/*0580*/ IMAD.X R9, R13, 0x1, R4, P1 ; /* 0x000000010d097824 */
/* 0x000fe400008e0604 */
/*0590*/ FADD R17, R15, R6 ; /* 0x000000060f117221 */
/* 0x004fca0000000000 */
/*05a0*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0005e8000c101904 */
/*05b0*/ LDG.E R6, [R8.64] ; /* 0x0000000408067981 */
/* 0x000ee2000c1e1900 */
/*05c0*/ IADD3 R10, P1, R8, R5, RZ ; /* 0x00000005080a7210 */
/* 0x000fca0007f3e0ff */
/*05d0*/ IMAD.X R11, R9, 0x1, R4, P1 ; /* 0x00000001090b7824 */
/* 0x000fe400008e0604 */
/*05e0*/ FADD R19, R17, R6 ; /* 0x0000000611137221 */
/* 0x008fca0000000000 */
/*05f0*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0007e8000c101904 */
/*0600*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000e22000c1e1900 */
/*0610*/ IADD3 R12, P1, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fca0007f3e0ff */
/*0620*/ IMAD.X R13, R11, 0x1, R4, P1 ; /* 0x000000010b0d7824 */
/* 0x000fe400008e0604 */
/*0630*/ FADD R15, R19, R6 ; /* 0x00000006130f7221 */
/* 0x001fca0000000000 */
/*0640*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0001e8000c101904 */
/*0650*/ LDG.E R6, [R12.64] ; /* 0x000000040c067981 */
/* 0x000ea2000c1e1900 */
/*0660*/ IADD3 R8, P1, R12, R5, RZ ; /* 0x000000050c087210 */
/* 0x000fca0007f3e0ff */
/*0670*/ IMAD.X R9, R13, 0x1, R4, P1 ; /* 0x000000010d097824 */
/* 0x000fe400008e0604 */
/*0680*/ FADD R17, R15, R6 ; /* 0x000000060f117221 */
/* 0x004fca0000000000 */
/*0690*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0005e8000c101904 */
/*06a0*/ LDG.E R6, [R8.64] ; /* 0x0000000408067981 */
/* 0x000ee2000c1e1900 */
/*06b0*/ IADD3 R10, P1, R8, R5, RZ ; /* 0x00000005080a7210 */
/* 0x000fca0007f3e0ff */
/*06c0*/ IMAD.X R11, R9, 0x1, R4, P1 ; /* 0x00000001090b7824 */
/* 0x000fe400008e0604 */
/*06d0*/ FADD R19, R17, R6 ; /* 0x0000000611137221 */
/* 0x008fca0000000000 */
/*06e0*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0007e8000c101904 */
/*06f0*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000e22000c1e1900 */
/*0700*/ IADD3 R12, P1, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fca0007f3e0ff */
/*0710*/ IMAD.X R13, R11, 0x1, R4, P1 ; /* 0x000000010b0d7824 */
/* 0x000fe400008e0604 */
/*0720*/ FADD R15, R19, R6 ; /* 0x00000006130f7221 */
/* 0x001fca0000000000 */
/*0730*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0001e8000c101904 */
/*0740*/ LDG.E R6, [R12.64] ; /* 0x000000040c067981 */
/* 0x000ea2000c1e1900 */
/*0750*/ IADD3 R8, P1, R12, R5, RZ ; /* 0x000000050c087210 */
/* 0x000fca0007f3e0ff */
/*0760*/ IMAD.X R9, R13, 0x1, R4, P1 ; /* 0x000000010d097824 */
/* 0x000fe400008e0604 */
/*0770*/ FADD R17, R15, R6 ; /* 0x000000060f117221 */
/* 0x004fca0000000000 */
/*0780*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0005e8000c101904 */
/*0790*/ LDG.E R6, [R8.64] ; /* 0x0000000408067981 */
/* 0x000ee2000c1e1900 */
/*07a0*/ IADD3 R10, P1, R8, R5, RZ ; /* 0x00000005080a7210 */
/* 0x000fc80007f3e0ff */
/*07b0*/ IADD3.X R11, R9, R4, RZ, P1, !PT ; /* 0x00000004090b7210 */
/* 0x000fe20000ffe4ff */
/*07c0*/ FADD R19, R17, R6 ; /* 0x0000000611137221 */
/* 0x008fca0000000000 */
/*07d0*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0007e8000c101904 */
/*07e0*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000f22000c1e1900 */
/*07f0*/ IADD3 R12, P1, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fca0007f3e0ff */
/*0800*/ IMAD.X R13, R11, 0x1, R4, P1 ; /* 0x000000010b0d7824 */
/* 0x000fe400008e0604 */
/*0810*/ FADD R21, R19, R6 ; /* 0x0000000613157221 */
/* 0x010fca0000000000 */
/*0820*/ STG.E [R2.64], R21 ; /* 0x0000001502007986 */
/* 0x0009e8000c101904 */
/*0830*/ LDG.E R6, [R12.64] ; /* 0x000000040c067981 */
/* 0x000ea2000c1e1900 */
/*0840*/ IADD3 R14, P1, R12, R5, RZ ; /* 0x000000050c0e7210 */
/* 0x000fca0007f3e0ff */
/*0850*/ IMAD.X R15, R13, 0x1, R4, P1 ; /* 0x000000010d0f7824 */
/* 0x001fe400008e0604 */
/*0860*/ FADD R17, R21, R6 ; /* 0x0000000615117221 */
/* 0x004fca0000000000 */
/*0870*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0009e8000c101904 */
/*0880*/ LDG.E R6, [R14.64] ; /* 0x000000040e067981 */
/* 0x000ee2000c1e1900 */
/*0890*/ IADD3 R10, P1, R14, R5, RZ ; /* 0x000000050e0a7210 */
/* 0x000fca0007f3e0ff */
/*08a0*/ IMAD.X R11, R15, 0x1, R4, P1 ; /* 0x000000010f0b7824 */
/* 0x000fe400008e0604 */
/*08b0*/ FADD R19, R17, R6 ; /* 0x0000000611137221 */
/* 0x008fca0000000000 */
/*08c0*/ STG.E [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0009e8000c101904 */
/*08d0*/ LDG.E R6, [R10.64] ; /* 0x000000040a067981 */
/* 0x000ea2000c1e1900 */
/*08e0*/ IADD3 R0, R0, 0x10, RZ ; /* 0x0000001000007810 */
/* 0x000fe40007ffe0ff */
/*08f0*/ IADD3 R12, P2, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fe40007f5e0ff */
/*0900*/ ISETP.GE.AND P1, PT, R0, R7, PT ; /* 0x000000070000720c */
/* 0x000fc60003f26270 */
/*0910*/ IMAD.X R13, R11, 0x1, R4, P2 ; /* 0x000000010b0d7824 */
/* 0x000fe400010e0604 */
/*0920*/ FADD R9, R19, R6 ; /* 0x0000000613097221 */
/* 0x004fca0000000000 */
/*0930*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0009e6000c101904 */
/*0940*/ @!P1 BRA 0x400 ; /* 0xfffffab000009947 */
/* 0x000fea000383ffff */
/*0950*/ IADD3 R6, -R0, c[0x0][0x170], RZ ; /* 0x00005c0000067a10 */
/* 0x000fc80007ffe1ff */
/*0960*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*0970*/ @!P1 BRA 0xc40 ; /* 0x000002c000009947 */
/* 0x000fea0003800000 */
/*0980*/ IMAD.MOV.U32 R6, RZ, RZ, R12 ; /* 0x000000ffff067224 */
/* 0x000fe400078e000c */
/*0990*/ IMAD.MOV.U32 R7, RZ, RZ, R13 ; /* 0x000000ffff077224 */
/* 0x000fca00078e000d */
/*09a0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea2000c1e1900 */
/*09b0*/ IADD3 R10, P0, R12, R5, RZ ; /* 0x000000050c0a7210 */
/* 0x000fc80007f1e0ff */
/*09c0*/ IADD3.X R11, R13, R4, RZ, P0, !PT ; /* 0x000000040d0b7210 */
/* 0x001fe200007fe4ff */
/*09d0*/ FADD R13, R9, R6 ; /* 0x00000006090d7221 */
/* 0x024fca0000000000 */
/*09e0*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x0001e8000c101904 */
/*09f0*/ LDG.E R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1900 */
/*0a00*/ IADD3 R8, P0, R10, R5, RZ ; /* 0x000000050a087210 */
/* 0x000fca0007f1e0ff */
/*0a10*/ IMAD.X R9, R11, 0x1, R4, P0 ; /* 0x000000010b097824 */
/* 0x010fe400000e0604 */
/*0a20*/ FADD R15, R13, R12 ; /* 0x0000000c0d0f7221 */
/* 0x004fca0000000000 */
/*0a30*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0005e8000c101904 */
/*0a40*/ LDG.E R12, [R8.64] ; /* 0x00000004080c7981 */
/* 0x000ee2000c1e1900 */
/*0a50*/ IADD3 R6, P0, R8, R5, RZ ; /* 0x0000000508067210 */
/* 0x000fca0007f1e0ff */
/*0a60*/ IMAD.X R7, R9, 0x1, R4, P0 ; /* 0x0000000109077824 */
/* 0x000fe400000e0604 */
/*0a70*/ FADD R17, R15, R12 ; /* 0x0000000c0f117221 */
/* 0x008fca0000000000 */
/*0a80*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x0007e8000c101904 */
/*0a90*/ LDG.E R12, [R6.64] ; /* 0x00000004060c7981 */
/* 0x000e22000c1e1900 */
/*0aa0*/ IADD3 R10, P0, R6, R5, RZ ; /* 0x00000005060a7210 */
/* 0x000fca0007f1e0ff */
/*0ab0*/ IMAD.X R11, R7, 0x1, R4, P0 ; /* 0x00000001070b7824 */
/* 0x000fe400000e0604 */
/*0ac0*/ FADD R13, R17, R12 ; /* 0x0000000c110d7221 */
/* 0x001fca0000000000 */
/*0ad0*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x0001e8000c101904 */
/*0ae0*/ LDG.E R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1900 */
/*0af0*/ IADD3 R8, P0, R10, R5, RZ ; /* 0x000000050a087210 */
/* 0x000fca0007f1e0ff */
/*0b00*/ IMAD.X R9, R11, 0x1, R4, P0 ; /* 0x000000010b097824 */
/* 0x000fe400000e0604 */
/*0b10*/ FADD R15, R13, R12 ; /* 0x0000000c0d0f7221 */
/* 0x004fca0000000000 */
/*0b20*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x000fe8000c101904 */
/*0b30*/ LDG.E R12, [R8.64] ; /* 0x00000004080c7981 */
/* 0x000ee2000c1e1900 */
/*0b40*/ IADD3 R6, P0, R8, R5, RZ ; /* 0x0000000508067210 */
/* 0x000fca0007f1e0ff */
/*0b50*/ IMAD.X R7, R9, 0x1, R4, P0 ; /* 0x0000000109077824 */
/* 0x000fe400000e0604 */
/*0b60*/ FADD R17, R15, R12 ; /* 0x0000000c0f117221 */
/* 0x008fca0000000000 */
/*0b70*/ STG.E [R2.64], R17 ; /* 0x0000001102007986 */
/* 0x000fe8000c101904 */
/*0b80*/ LDG.E R12, [R6.64] ; /* 0x00000004060c7981 */
/* 0x000e22000c1e1900 */
/*0b90*/ IADD3 R10, P0, R6, R5, RZ ; /* 0x00000005060a7210 */
/* 0x000fca0007f1e0ff */
/*0ba0*/ IMAD.X R11, R7, 0x1, R4, P0 ; /* 0x00000001070b7824 */
/* 0x000fe400000e0604 */
/*0bb0*/ FADD R13, R17, R12 ; /* 0x0000000c110d7221 */
/* 0x001fca0000000000 */
/*0bc0*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x0001e8000c101904 */
/*0bd0*/ LDG.E R12, [R10.64] ; /* 0x000000040a0c7981 */
/* 0x000ea2000c1e1900 */
/*0be0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0bf0*/ IADD3 R0, R0, 0x8, RZ ; /* 0x0000000800007810 */
/* 0x000fe20007ffe0ff */
/*0c00*/ FADD R9, R13, R12 ; /* 0x0000000c0d097221 */
/* 0x004fe20000000000 */
/*0c10*/ IADD3 R12, P1, R10, R5, RZ ; /* 0x000000050a0c7210 */
/* 0x000fc80007f3e0ff */
/*0c20*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0005e2000c101904 */
/*0c30*/ IADD3.X R13, R11, R4, RZ, P1, !PT ; /* 0x000000040b0d7210 */
/* 0x001fc60000ffe4ff */
/*0c40*/ ISETP.LT.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fda0000701670 */
/*0c50*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0c60*/ IMAD.MOV.U32 R6, RZ, RZ, R12 ; /* 0x000000ffff067224 */
/* 0x000fe400078e000c */
/*0c70*/ IMAD.MOV.U32 R7, RZ, RZ, R13 ; /* 0x000000ffff077224 */
/* 0x000fca00078e000d */
/*0c80*/ LDG.E R0, [R6.64] ; /* 0x0000000406007981 */
/* 0x000ee2000c1e1900 */
/*0c90*/ IADD3 R10, P0, R12, R5, RZ ; /* 0x000000050c0a7210 */
/* 0x000fca0007f1e0ff */
/*0ca0*/ IMAD.X R11, R13, 0x1, R4, P0 ; /* 0x000000010d0b7824 */
/* 0x001fe400000e0604 */
/*0cb0*/ FADD R13, R0, R9 ; /* 0x00000009000d7221 */
/* 0x028fca0000000000 */
/*0cc0*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x000fe8000c101904 */
/*0cd0*/ LDG.E R0, [R10.64] ; /* 0x000000040a007981 */
/* 0x000ee2000c1e1900 */
/*0ce0*/ IADD3 R8, P0, R10, R5, RZ ; /* 0x000000050a087210 */
/* 0x000fca0007f1e0ff */
/*0cf0*/ IMAD.X R9, R11, 0x1, R4, P0 ; /* 0x000000010b097824 */
/* 0x014fe400000e0604 */
/*0d00*/ FADD R15, R13, R0 ; /* 0x000000000d0f7221 */
/* 0x008fca0000000000 */
/*0d10*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x000fe8000c101904 */
/*0d20*/ LDG.E R0, [R8.64] ; /* 0x0000000408007981 */
/* 0x000ea2000c1e1900 */
/*0d30*/ IADD3 R6, P0, R8, R5, RZ ; /* 0x0000000508067210 */
/* 0x000fca0007f1e0ff */
/*0d40*/ IMAD.X R7, R9, 0x1, R4, P0 ; /* 0x0000000109077824 */
/* 0x000fe400000e0604 */
/*0d50*/ FADD R5, R15, R0 ; /* 0x000000000f057221 */
/* 0x004fca0000000000 */
/*0d60*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*0d70*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea4000c1e1900 */
/*0d80*/ FADD R11, R5, R6 ; /* 0x00000006050b7221 */
/* 0x004fca0000000000 */
/*0d90*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x000fe2000c101904 */
/*0da0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0db0*/ BRA 0xdb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0dc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0dd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0de0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0df0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z21createLaplacianKernelPfS_ii
.globl _Z21createLaplacianKernelPfS_ii
.p2align 8
.type _Z21createLaplacianKernelPfS_ii,@function
_Z21createLaplacianKernelPfS_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_ashr_i32 s5, s3, 31
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
s_mov_b32 s4, s3
s_cmp_gt_i32 s2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[1:2]
s_cselect_b32 s5, -1, 0
s_mov_b32 s4, 0
s_and_b32 s5, vcc_lo, s5
s_and_saveexec_b32 s6, s5
s_cbranch_execz .LBB0_7
s_load_b128 s[8:11], s[0:1], 0x0
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_mov_b32 s0, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s8, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s10, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v3, vcc_lo
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_add_i32 s0, s0, 1
s_add_i32 s4, s4, s3
s_cmp_lg_u32 s2, s0
s_cbranch_scc0 .LBB0_7
.LBB0_3:
s_cmp_lg_u32 s0, 0
s_cbranch_scc0 .LBB0_5
s_ashr_i32 s5, s4, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[6:7], s[4:5], 2
v_add_co_u32 v4, vcc_lo, v0, s6
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
global_load_b32 v4, v[4:5], off
global_load_b32 v5, v[2:3], off
s_waitcnt vmcnt(0)
v_add_f32_e32 v4, v4, v5
global_store_b32 v[2:3], v4, off
s_cbranch_execnz .LBB0_2
s_branch .LBB0_6
.p2align 6
.LBB0_5:
.LBB0_6:
global_load_b32 v4, v[0:1], off
s_waitcnt vmcnt(0)
global_store_b32 v[2:3], v4, off
s_branch .LBB0_2
.LBB0_7:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21createLaplacianKernelPfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z21createLaplacianKernelPfS_ii, .Lfunc_end0-_Z21createLaplacianKernelPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21createLaplacianKernelPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z21createLaplacianKernelPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000dcd8c_00000000-6_createLaplacianKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii
.type _Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii, @function
_Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z21createLaplacianKernelPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii, .-_Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii
.globl _Z21createLaplacianKernelPfS_ii
.type _Z21createLaplacianKernelPfS_ii, @function
_Z21createLaplacianKernelPfS_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z21createLaplacianKernelPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z21createLaplacianKernelPfS_ii, .-_Z21createLaplacianKernelPfS_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z21createLaplacianKernelPfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z21createLaplacianKernelPfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "createLaplacianKernel.hip"
.globl _Z36__device_stub__createLaplacianKernelPfS_ii # -- Begin function _Z36__device_stub__createLaplacianKernelPfS_ii
.p2align 4, 0x90
.type _Z36__device_stub__createLaplacianKernelPfS_ii,@function
_Z36__device_stub__createLaplacianKernelPfS_ii: # @_Z36__device_stub__createLaplacianKernelPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z21createLaplacianKernelPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z36__device_stub__createLaplacianKernelPfS_ii, .Lfunc_end0-_Z36__device_stub__createLaplacianKernelPfS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21createLaplacianKernelPfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z21createLaplacianKernelPfS_ii,@object # @_Z21createLaplacianKernelPfS_ii
.section .rodata,"a",@progbits
.globl _Z21createLaplacianKernelPfS_ii
.p2align 3, 0x0
_Z21createLaplacianKernelPfS_ii:
.quad _Z36__device_stub__createLaplacianKernelPfS_ii
.size _Z21createLaplacianKernelPfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z21createLaplacianKernelPfS_ii"
.size .L__unnamed_1, 32
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z36__device_stub__createLaplacianKernelPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z21createLaplacianKernelPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* linear size of the matrices */
#define SIZE 1024
/* CPU matrix multiply function */
void host_dgemm( const int m, const int n, const int k,
double const * const a, double const * const b, double *c )
{
/*
* naive matrix multiplication loops go here. triply nested for loop
* C = A * B where A and B are matrices
* C(i,j) = SUM( A(i,k) * B(k,j), over the index "k", where 0 <= k < (SIZE-1) )
*/
/* insert code here */
for( int j = 0; j < n; j++ )
{
for( int i = 0; i < m; i++ )
{
for( int koff = 0; koff < k; koff++ )
{
c[INDX(i, j, m)] += a[INDX( i, koff, m )] * b[INDX( koff, j, n )];
} /* end for koff */
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declare host pointers */
double *h_a, *h_b, *h_cdef;
size_t numbytes = size * size * sizeof( double );
/* allocate host pointers */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_cdef = (double *) malloc( numbytes );
if( h_cdef == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* set C to zero */
memset( h_cdef, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize A and B on the host */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* start timers */
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
/* call host dgemm */
host_dgemm( size, size, size, h_a, h_b, h_cdef );
/* stop the timers */
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
/* print the results */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* cleanup */
free( h_a );
free( h_b );
free( h_cdef );
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* linear size of the matrices */
#define SIZE 1024
/* CPU matrix multiply function */
void host_dgemm( const int m, const int n, const int k,
double const * const a, double const * const b, double *c )
{
/*
* naive matrix multiplication loops go here. triply nested for loop
* C = A * B where A and B are matrices
* C(i,j) = SUM( A(i,k) * B(k,j), over the index "k", where 0 <= k < (SIZE-1) )
*/
/* insert code here */
for( int j = 0; j < n; j++ )
{
for( int i = 0; i < m; i++ )
{
for( int koff = 0; koff < k; koff++ )
{
c[INDX(i, j, m)] += a[INDX( i, koff, m )] * b[INDX( koff, j, n )];
} /* end for koff */
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declare host pointers */
double *h_a, *h_b, *h_cdef;
size_t numbytes = size * size * sizeof( double );
/* allocate host pointers */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_cdef = (double *) malloc( numbytes );
if( h_cdef == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* set C to zero */
memset( h_cdef, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize A and B on the host */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* start timers */
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
/* call host dgemm */
host_dgemm( size, size, size, h_a, h_b, h_cdef );
/* stop the timers */
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
/* print the results */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* cleanup */
free( h_a );
free( h_b );
free( h_cdef );
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
} | .file "tmpxft_0009af6f_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10host_dgemmiiiPKdS0_Pd
.type _Z10host_dgemmiiiPKdS0_Pd, @function
_Z10host_dgemmiiiPKdS0_Pd:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movq %rcx, -8(%rsp)
testl %esi, %esi
jle .L3
movl %edi, %r10d
movl %esi, %eax
movl %edx, %ebx
movq %r8, %r14
movq %r9, %r12
movslq %edi, %rdi
salq $3, %rdi
movl $0, %r11d
movl $0, %r13d
movl $0, %ecx
movslq %edx, %r15
movq %r15, %rsi
jmp .L5
.L8:
leal (%r9,%r11), %edx
movslq %edx, %rdx
leaq (%r12,%rdx,8), %r13
movq %r15, %rdx
movq %rbp, %rcx
.L6:
movsd (%rcx), %xmm0
mulsd (%rdx), %xmm0
addsd 0(%r13), %xmm0
movsd %xmm0, 0(%r13)
addq %rdi, %rcx
addq $8, %rdx
cmpq %r8, %rdx
jne .L6
.L9:
addl $1, %r9d
addq $8, %rbp
cmpl %r9d, %r10d
je .L12
.L10:
testl %ebx, %ebx
jg .L8
jmp .L9
.L12:
movl -16(%rsp), %ecx
movl -12(%rsp), %r13d
.L7:
addl $1, %ecx
addl %eax, %r13d
addl %r10d, %r11d
cmpl %ecx, %eax
je .L3
.L5:
testl %r10d, %r10d
jle .L7
movq -8(%rsp), %rbp
movslq %r13d, %rdx
leaq (%r14,%rdx,8), %r15
addq %rsi, %rdx
leaq (%r14,%rdx,8), %r8
movl $0, %r9d
movl %ecx, -16(%rsp)
movl %r13d, -12(%rsp)
jmp .L10
.L3:
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z10host_dgemmiiiPKdS0_Pd, .-_Z10host_dgemmiiiPKdS0_Pd
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Matrix size is %d\n"
.LC1:
.string "Error in host malloc\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "Total memory required is %lf MB\n"
.section .rodata.str1.1
.LC6:
.string "Total time CPU is %f sec\n"
.LC10:
.string "Performance is %f GFlop/s\n"
.LC11:
.string "cudaDeviceReset failed!"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movl $1024, %ecx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
call __fprintf_chk@PLT
movl $8388608, %edi
call malloc@PLT
testq %rax, %rax
je .L24
movq %rax, %r12
movl $8388608, %edi
call malloc@PLT
movq %rax, %rbp
testq %rax, %rax
je .L25
movl $8388608, %edi
call malloc@PLT
movq %rax, %r13
testq %rax, %rax
je .L26
movl $8388608, %edx
movl $0, %esi
movq %rax, %rdi
call memset@PLT
movsd .LC2(%rip), %xmm0
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movl $0, %ebx
.L19:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
mulsd .LC4(%rip), %xmm0
movsd %xmm0, (%r12,%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
mulsd .LC4(%rip), %xmm0
movsd %xmm0, 0(%rbp,%rbx)
addq $8, %rbx
cmpq $8388608, %rbx
jne .L19
leaq 8(%rsp), %rdi
call cudaEventCreate@PLT
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movq %r13, %r9
movq %rbp, %r8
movq %r12, %rcx
movl $1024, %edx
movl $1024, %esi
movl $1024, %edi
call _Z10host_dgemmiiiPKdS0_Pd
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq 16(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 4(%rsp), %rdi
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
call cudaEventElapsedTime@PLT
movss 4(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
pxor %xmm1, %xmm1
cvtss2sd 4(%rsp), %xmm1
divsd .LC7(%rip), %xmm1
movsd .LC8(%rip), %xmm0
divsd %xmm1, %xmm0
mulsd .LC9(%rip), %xmm0
leaq .LC10(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
call cudaDeviceReset@PLT
movl %eax, %edx
movl $0, %eax
testl %edx, %edx
jne .L27
.L14:
movq 24(%rsp), %rdx
subq %fs:40, %rdx
jne .L28
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $911, %eax
jmp .L14
.L25:
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $911, %eax
jmp .L14
.L26:
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $911, %eax
jmp .L14
.L27:
leaq .LC11(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
movl $1, %eax
jmp .L14
.L28:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 1896932436
.long 1077488243
.align 8
.LC4:
.long 0
.long 1040187392
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC5:
.long 1148846080
.section .rodata.cst8
.align 8
.LC7:
.long 0
.long 1083129856
.align 8
.LC8:
.long 0
.long 1105199104
.align 8
.LC9:
.long -400107883
.long 1041313291
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* linear size of the matrices */
#define SIZE 1024
/* CPU matrix multiply function */
void host_dgemm( const int m, const int n, const int k,
double const * const a, double const * const b, double *c )
{
/*
* naive matrix multiplication loops go here. triply nested for loop
* C = A * B where A and B are matrices
* C(i,j) = SUM( A(i,k) * B(k,j), over the index "k", where 0 <= k < (SIZE-1) )
*/
/* insert code here */
for( int j = 0; j < n; j++ )
{
for( int i = 0; i < m; i++ )
{
for( int koff = 0; koff < k; koff++ )
{
c[INDX(i, j, m)] += a[INDX( i, koff, m )] * b[INDX( koff, j, n )];
} /* end for koff */
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declare host pointers */
double *h_a, *h_b, *h_cdef;
size_t numbytes = size * size * sizeof( double );
/* allocate host pointers */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_cdef = (double *) malloc( numbytes );
if( h_cdef == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* set C to zero */
memset( h_cdef, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize A and B on the host */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* start timers */
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
/* call host dgemm */
host_dgemm( size, size, size, h_a, h_b, h_cdef );
/* stop the timers */
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
/* print the results */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* cleanup */
free( h_a );
free( h_b );
free( h_cdef );
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
} | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* linear size of the matrices */
#define SIZE 1024
/* CPU matrix multiply function */
void host_dgemm( const int m, const int n, const int k,
double const * const a, double const * const b, double *c )
{
/*
* naive matrix multiplication loops go here. triply nested for loop
* C = A * B where A and B are matrices
* C(i,j) = SUM( A(i,k) * B(k,j), over the index "k", where 0 <= k < (SIZE-1) )
*/
/* insert code here */
for( int j = 0; j < n; j++ )
{
for( int i = 0; i < m; i++ )
{
for( int koff = 0; koff < k; koff++ )
{
c[INDX(i, j, m)] += a[INDX( i, koff, m )] * b[INDX( koff, j, n )];
} /* end for koff */
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declare host pointers */
double *h_a, *h_b, *h_cdef;
size_t numbytes = size * size * sizeof( double );
/* allocate host pointers */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_cdef = (double *) malloc( numbytes );
if( h_cdef == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* set C to zero */
memset( h_cdef, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize A and B on the host */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* start timers */
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
/* call host dgemm */
host_dgemm( size, size, size, h_a, h_b, h_cdef );
/* stop the timers */
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop );
/* print the results */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* cleanup */
free( h_a );
free( h_b );
free( h_cdef );
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* linear size of the matrices */
#define SIZE 1024
/* CPU matrix multiply function */
void host_dgemm( const int m, const int n, const int k,
double const * const a, double const * const b, double *c )
{
/*
* naive matrix multiplication loops go here. triply nested for loop
* C = A * B where A and B are matrices
* C(i,j) = SUM( A(i,k) * B(k,j), over the index "k", where 0 <= k < (SIZE-1) )
*/
/* insert code here */
for( int j = 0; j < n; j++ )
{
for( int i = 0; i < m; i++ )
{
for( int koff = 0; koff < k; koff++ )
{
c[INDX(i, j, m)] += a[INDX( i, koff, m )] * b[INDX( koff, j, n )];
} /* end for koff */
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declare host pointers */
double *h_a, *h_b, *h_cdef;
size_t numbytes = size * size * sizeof( double );
/* allocate host pointers */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_cdef = (double *) malloc( numbytes );
if( h_cdef == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* set C to zero */
memset( h_cdef, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize A and B on the host */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* start timers */
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
/* call host dgemm */
host_dgemm( size, size, size, h_a, h_b, h_cdef );
/* stop the timers */
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop );
/* print the results */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* cleanup */
free( h_a );
free( h_b );
free( h_cdef );
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* linear size of the matrices */
#define SIZE 1024
/* CPU matrix multiply function */
void host_dgemm( const int m, const int n, const int k,
double const * const a, double const * const b, double *c )
{
/*
* naive matrix multiplication loops go here. triply nested for loop
* C = A * B where A and B are matrices
* C(i,j) = SUM( A(i,k) * B(k,j), over the index "k", where 0 <= k < (SIZE-1) )
*/
/* insert code here */
for( int j = 0; j < n; j++ )
{
for( int i = 0; i < m; i++ )
{
for( int koff = 0; koff < k; koff++ )
{
c[INDX(i, j, m)] += a[INDX( i, koff, m )] * b[INDX( koff, j, n )];
} /* end for koff */
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declare host pointers */
double *h_a, *h_b, *h_cdef;
size_t numbytes = size * size * sizeof( double );
/* allocate host pointers */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_cdef = (double *) malloc( numbytes );
if( h_cdef == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* set C to zero */
memset( h_cdef, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize A and B on the host */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* start timers */
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
/* call host dgemm */
host_dgemm( size, size, size, h_a, h_b, h_cdef );
/* stop the timers */
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop );
/* print the results */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* cleanup */
free( h_a );
free( h_b );
free( h_cdef );
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
} | .text
.file "kernel.hip"
.globl _Z10host_dgemmiiiPKdS0_Pd # -- Begin function _Z10host_dgemmiiiPKdS0_Pd
.p2align 4, 0x90
.type _Z10host_dgemmiiiPKdS0_Pd,@function
_Z10host_dgemmiiiPKdS0_Pd: # @_Z10host_dgemmiiiPKdS0_Pd
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, -8(%rsp) # 8-byte Spill
movq %r8, -16(%rsp) # 8-byte Spill
movq %rcx, -24(%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB0_9
# %bb.1: # %.preheader24.lr.ph
movslq %edi, %rax
movl %esi, %r10d
movl %eax, %r11d
movl %edx, %ebx
movq %rax, -32(%rsp) # 8-byte Spill
leaq (,%rax,8), %r14
xorl %r15d, %r15d
xorl %r12d, %r12d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_8: # %._crit_edge27
# in Loop: Header=BB0_2 Depth=1
incq %r12
addl %esi, %r15d
cmpq %r10, %r12
je .LBB0_9
.LBB0_2: # %.preheader24
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
# Child Loop BB0_6 Depth 3
testl %edi, %edi
jle .LBB0_8
# %bb.3: # %.preheader.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r15d, %eax
movq -16(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,8), %r13
movq %r12, %rax
imulq -32(%rsp), %rax # 8-byte Folded Reload
movq -8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,8), %rbp
movq -24(%rsp), %r8 # 8-byte Reload
xorl %r9d, %r9d
jmp .LBB0_4
.p2align 4, 0x90
.LBB0_7: # %._crit_edge
# in Loop: Header=BB0_4 Depth=2
incq %r9
addq $8, %r8
cmpq %r11, %r9
je .LBB0_8
.LBB0_4: # %.preheader
# Parent Loop BB0_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB0_6 Depth 3
testl %edx, %edx
jle .LBB0_7
# %bb.5: # %.lr.ph
# in Loop: Header=BB0_4 Depth=2
movsd (%rbp,%r9,8), %xmm0 # xmm0 = mem[0],zero
movq %r8, %rcx
xorl %eax, %eax
.p2align 4, 0x90
.LBB0_6: # Parent Loop BB0_2 Depth=1
# Parent Loop BB0_4 Depth=2
# => This Inner Loop Header: Depth=3
movsd (%rcx), %xmm1 # xmm1 = mem[0],zero
mulsd (%r13,%rax,8), %xmm1
addsd %xmm1, %xmm0
movsd %xmm0, (%rbp,%r9,8)
incq %rax
addq %r14, %rcx
cmpq %rax, %rbx
jne .LBB0_6
jmp .LBB0_7
.LBB0_9: # %._crit_edge29
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z10host_dgemmiiiPKdS0_Pd, .Lfunc_end0-_Z10host_dgemmiiiPKdS0_Pd
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x40392a737110e454 # double 25.165824000000001
.LCPI1_1:
.quad 0x3e00000000000000 # double 4.6566128730773926E-10
.LCPI1_3:
.quad 0x408f400000000000 # double 1000
.LCPI1_4:
.quad 0x41e0000000000000 # double 2147483648
.LCPI1_5:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI1_2:
.long 0x447a0000 # float 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $24, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq stdout(%rip), %rdi
movl $.L.str, %esi
movl $1024, %edx # imm = 0x400
xorl %eax, %eax
callq fprintf
movl $8388608, %edi # imm = 0x800000
callq malloc
testq %rax, %rax
je .LBB1_1
# %bb.2:
movq %rax, %rbx
movl $8388608, %edi # imm = 0x800000
callq malloc
testq %rax, %rax
je .LBB1_1
# %bb.3:
movq %rax, %r14
movl $8388608, %edi # imm = 0x800000
callq malloc
testq %rax, %rax
je .LBB1_1
# %bb.4:
movq %rax, %r15
xorl %r12d, %r12d
movl $8388608, %edx # imm = 0x800000
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
movq stdout(%rip), %rdi
movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.2, %esi
movb $1, %al
callq fprintf
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
movsd .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movsd %xmm0, (%rbx,%r12,8)
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
mulsd .LCPI1_1(%rip), %xmm0
movsd %xmm0, (%r14,%r12,8)
incq %r12
cmpq $1048576, %r12 # imm = 0x100000
jne .LBB1_5
# %bb.6:
leaq 16(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 16(%rsp), %rdi
xorl %r12d, %r12d
xorl %esi, %esi
callq hipEventRecord
movq %r14, %rax
.p2align 4, 0x90
.LBB1_7: # %.preheader24.i
# =>This Loop Header: Depth=1
# Child Loop BB1_8 Depth 2
# Child Loop BB1_9 Depth 3
movq %r12, %rcx
shlq $13, %rcx
addq %r15, %rcx
movq %rbx, %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB1_8: # %.preheader.i
# Parent Loop BB1_7 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_9 Depth 3
movsd (%rcx,%rsi,8), %xmm0 # xmm0 = mem[0],zero
movq %rdx, %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB1_9: # Parent Loop BB1_7 Depth=1
# Parent Loop BB1_8 Depth=2
# => This Inner Loop Header: Depth=3
movsd (%rdi), %xmm1 # xmm1 = mem[0],zero
mulsd (%rax,%r8,8), %xmm1
addsd %xmm1, %xmm0
incq %r8
addq $8192, %rdi # imm = 0x2000
cmpq $1024, %r8 # imm = 0x400
jne .LBB1_9
# %bb.10: # %._crit_edge.i
# in Loop: Header=BB1_8 Depth=2
movsd %xmm0, (%rcx,%rsi,8)
incq %rsi
addq $8, %rdx
cmpq $1024, %rsi # imm = 0x400
jne .LBB1_8
# %bb.11: # %._crit_edge27.i
# in Loop: Header=BB1_7 Depth=1
incq %r12
addq $8192, %rax # imm = 0x2000
cmpq $1024, %r12 # imm = 0x400
jne .LBB1_7
# %bb.12: # %_Z10host_dgemmiiiPKdS0_Pd.exit
movq 8(%rsp), %rdi
xorl %r12d, %r12d
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq stdout(%rip), %rdi
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI1_2(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %esi
movb $1, %al
callq fprintf
movq stdout(%rip), %rdi
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorps %xmm1, %xmm1
cvtss2sd %xmm0, %xmm1
divsd .LCPI1_3(%rip), %xmm1
movsd .LCPI1_4(%rip), %xmm0 # xmm0 = mem[0],zero
divsd %xmm1, %xmm0
mulsd .LCPI1_5(%rip), %xmm0
movl $.L.str.4, %esi
movb $1, %al
callq fprintf
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
callq hipDeviceReset
testl %eax, %eax
jne .LBB1_13
.LBB1_14:
movl %r12d, %eax
addq $24, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_1:
.cfi_def_cfa_offset 64
movq stderr(%rip), %rcx
movl $.L.str.1, %edi
movl $21, %esi
movl $1, %edx
callq fwrite@PLT
movl $911, %r12d # imm = 0x38F
jmp .LBB1_14
.LBB1_13:
movq stderr(%rip), %rcx
movl $.L.str.5, %edi
movl $22, %esi
movl $1, %edx
callq fwrite@PLT
movl $1, %r12d
jmp .LBB1_14
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Matrix size is %d\n"
.size .L.str, 19
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Error in host malloc\n"
.size .L.str.1, 22
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Total memory required is %lf MB\n"
.size .L.str.2, 33
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Total time CPU is %f sec\n"
.size .L.str.3, 26
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Performance is %f GFlop/s\n"
.size .L.str.4, 27
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "hipDeviceReset failed!"
.size .L.str.5, 23
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0009af6f_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10host_dgemmiiiPKdS0_Pd
.type _Z10host_dgemmiiiPKdS0_Pd, @function
_Z10host_dgemmiiiPKdS0_Pd:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movq %rcx, -8(%rsp)
testl %esi, %esi
jle .L3
movl %edi, %r10d
movl %esi, %eax
movl %edx, %ebx
movq %r8, %r14
movq %r9, %r12
movslq %edi, %rdi
salq $3, %rdi
movl $0, %r11d
movl $0, %r13d
movl $0, %ecx
movslq %edx, %r15
movq %r15, %rsi
jmp .L5
.L8:
leal (%r9,%r11), %edx
movslq %edx, %rdx
leaq (%r12,%rdx,8), %r13
movq %r15, %rdx
movq %rbp, %rcx
.L6:
movsd (%rcx), %xmm0
mulsd (%rdx), %xmm0
addsd 0(%r13), %xmm0
movsd %xmm0, 0(%r13)
addq %rdi, %rcx
addq $8, %rdx
cmpq %r8, %rdx
jne .L6
.L9:
addl $1, %r9d
addq $8, %rbp
cmpl %r9d, %r10d
je .L12
.L10:
testl %ebx, %ebx
jg .L8
jmp .L9
.L12:
movl -16(%rsp), %ecx
movl -12(%rsp), %r13d
.L7:
addl $1, %ecx
addl %eax, %r13d
addl %r10d, %r11d
cmpl %ecx, %eax
je .L3
.L5:
testl %r10d, %r10d
jle .L7
movq -8(%rsp), %rbp
movslq %r13d, %rdx
leaq (%r14,%rdx,8), %r15
addq %rsi, %rdx
leaq (%r14,%rdx,8), %r8
movl $0, %r9d
movl %ecx, -16(%rsp)
movl %r13d, -12(%rsp)
jmp .L10
.L3:
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z10host_dgemmiiiPKdS0_Pd, .-_Z10host_dgemmiiiPKdS0_Pd
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Matrix size is %d\n"
.LC1:
.string "Error in host malloc\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "Total memory required is %lf MB\n"
.section .rodata.str1.1
.LC6:
.string "Total time CPU is %f sec\n"
.LC10:
.string "Performance is %f GFlop/s\n"
.LC11:
.string "cudaDeviceReset failed!"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movl $1024, %ecx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
call __fprintf_chk@PLT
movl $8388608, %edi
call malloc@PLT
testq %rax, %rax
je .L24
movq %rax, %r12
movl $8388608, %edi
call malloc@PLT
movq %rax, %rbp
testq %rax, %rax
je .L25
movl $8388608, %edi
call malloc@PLT
movq %rax, %r13
testq %rax, %rax
je .L26
movl $8388608, %edx
movl $0, %esi
movq %rax, %rdi
call memset@PLT
movsd .LC2(%rip), %xmm0
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movl $0, %ebx
.L19:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
mulsd .LC4(%rip), %xmm0
movsd %xmm0, (%r12,%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
mulsd .LC4(%rip), %xmm0
movsd %xmm0, 0(%rbp,%rbx)
addq $8, %rbx
cmpq $8388608, %rbx
jne .L19
leaq 8(%rsp), %rdi
call cudaEventCreate@PLT
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movq %r13, %r9
movq %rbp, %r8
movq %r12, %rcx
movl $1024, %edx
movl $1024, %esi
movl $1024, %edi
call _Z10host_dgemmiiiPKdS0_Pd
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq 16(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 4(%rsp), %rdi
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
call cudaEventElapsedTime@PLT
movss 4(%rsp), %xmm0
divss .LC5(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
pxor %xmm1, %xmm1
cvtss2sd 4(%rsp), %xmm1
divsd .LC7(%rip), %xmm1
movsd .LC8(%rip), %xmm0
divsd %xmm1, %xmm0
mulsd .LC9(%rip), %xmm0
leaq .LC10(%rip), %rdx
movl $2, %esi
movq stdout(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
call cudaDeviceReset@PLT
movl %eax, %edx
movl $0, %eax
testl %edx, %edx
jne .L27
.L14:
movq 24(%rsp), %rdx
subq %fs:40, %rdx
jne .L28
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $911, %eax
jmp .L14
.L25:
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $911, %eax
jmp .L14
.L26:
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $911, %eax
jmp .L14
.L27:
leaq .LC11(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
movl $1, %eax
jmp .L14
.L28:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 1896932436
.long 1077488243
.align 8
.LC4:
.long 0
.long 1040187392
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC5:
.long 1148846080
.section .rodata.cst8
.align 8
.LC7:
.long 0
.long 1083129856
.align 8
.LC8:
.long 0
.long 1105199104
.align 8
.LC9:
.long -400107883
.long 1041313291
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl _Z10host_dgemmiiiPKdS0_Pd # -- Begin function _Z10host_dgemmiiiPKdS0_Pd
.p2align 4, 0x90
.type _Z10host_dgemmiiiPKdS0_Pd,@function
_Z10host_dgemmiiiPKdS0_Pd: # @_Z10host_dgemmiiiPKdS0_Pd
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, -8(%rsp) # 8-byte Spill
movq %r8, -16(%rsp) # 8-byte Spill
movq %rcx, -24(%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB0_9
# %bb.1: # %.preheader24.lr.ph
movslq %edi, %rax
movl %esi, %r10d
movl %eax, %r11d
movl %edx, %ebx
movq %rax, -32(%rsp) # 8-byte Spill
leaq (,%rax,8), %r14
xorl %r15d, %r15d
xorl %r12d, %r12d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_8: # %._crit_edge27
# in Loop: Header=BB0_2 Depth=1
incq %r12
addl %esi, %r15d
cmpq %r10, %r12
je .LBB0_9
.LBB0_2: # %.preheader24
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
# Child Loop BB0_6 Depth 3
testl %edi, %edi
jle .LBB0_8
# %bb.3: # %.preheader.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r15d, %eax
movq -16(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,8), %r13
movq %r12, %rax
imulq -32(%rsp), %rax # 8-byte Folded Reload
movq -8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,8), %rbp
movq -24(%rsp), %r8 # 8-byte Reload
xorl %r9d, %r9d
jmp .LBB0_4
.p2align 4, 0x90
.LBB0_7: # %._crit_edge
# in Loop: Header=BB0_4 Depth=2
incq %r9
addq $8, %r8
cmpq %r11, %r9
je .LBB0_8
.LBB0_4: # %.preheader
# Parent Loop BB0_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB0_6 Depth 3
testl %edx, %edx
jle .LBB0_7
# %bb.5: # %.lr.ph
# in Loop: Header=BB0_4 Depth=2
movsd (%rbp,%r9,8), %xmm0 # xmm0 = mem[0],zero
movq %r8, %rcx
xorl %eax, %eax
.p2align 4, 0x90
.LBB0_6: # Parent Loop BB0_2 Depth=1
# Parent Loop BB0_4 Depth=2
# => This Inner Loop Header: Depth=3
movsd (%rcx), %xmm1 # xmm1 = mem[0],zero
mulsd (%r13,%rax,8), %xmm1
addsd %xmm1, %xmm0
movsd %xmm0, (%rbp,%r9,8)
incq %rax
addq %r14, %rcx
cmpq %rax, %rbx
jne .LBB0_6
jmp .LBB0_7
.LBB0_9: # %._crit_edge29
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z10host_dgemmiiiPKdS0_Pd, .Lfunc_end0-_Z10host_dgemmiiiPKdS0_Pd
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x40392a737110e454 # double 25.165824000000001
.LCPI1_1:
.quad 0x3e00000000000000 # double 4.6566128730773926E-10
.LCPI1_3:
.quad 0x408f400000000000 # double 1000
.LCPI1_4:
.quad 0x41e0000000000000 # double 2147483648
.LCPI1_5:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI1_2:
.long 0x447a0000 # float 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $24, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq stdout(%rip), %rdi
movl $.L.str, %esi
movl $1024, %edx # imm = 0x400
xorl %eax, %eax
callq fprintf
movl $8388608, %edi # imm = 0x800000
callq malloc
testq %rax, %rax
je .LBB1_1
# %bb.2:
movq %rax, %rbx
movl $8388608, %edi # imm = 0x800000
callq malloc
testq %rax, %rax
je .LBB1_1
# %bb.3:
movq %rax, %r14
movl $8388608, %edi # imm = 0x800000
callq malloc
testq %rax, %rax
je .LBB1_1
# %bb.4:
movq %rax, %r15
xorl %r12d, %r12d
movl $8388608, %edx # imm = 0x800000
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
movq stdout(%rip), %rdi
movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.2, %esi
movb $1, %al
callq fprintf
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
movsd .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero
mulsd %xmm1, %xmm0
movsd %xmm0, (%rbx,%r12,8)
callq rand
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
mulsd .LCPI1_1(%rip), %xmm0
movsd %xmm0, (%r14,%r12,8)
incq %r12
cmpq $1048576, %r12 # imm = 0x100000
jne .LBB1_5
# %bb.6:
leaq 16(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 16(%rsp), %rdi
xorl %r12d, %r12d
xorl %esi, %esi
callq hipEventRecord
movq %r14, %rax
.p2align 4, 0x90
.LBB1_7: # %.preheader24.i
# =>This Loop Header: Depth=1
# Child Loop BB1_8 Depth 2
# Child Loop BB1_9 Depth 3
movq %r12, %rcx
shlq $13, %rcx
addq %r15, %rcx
movq %rbx, %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB1_8: # %.preheader.i
# Parent Loop BB1_7 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_9 Depth 3
movsd (%rcx,%rsi,8), %xmm0 # xmm0 = mem[0],zero
movq %rdx, %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB1_9: # Parent Loop BB1_7 Depth=1
# Parent Loop BB1_8 Depth=2
# => This Inner Loop Header: Depth=3
movsd (%rdi), %xmm1 # xmm1 = mem[0],zero
mulsd (%rax,%r8,8), %xmm1
addsd %xmm1, %xmm0
incq %r8
addq $8192, %rdi # imm = 0x2000
cmpq $1024, %r8 # imm = 0x400
jne .LBB1_9
# %bb.10: # %._crit_edge.i
# in Loop: Header=BB1_8 Depth=2
movsd %xmm0, (%rcx,%rsi,8)
incq %rsi
addq $8, %rdx
cmpq $1024, %rsi # imm = 0x400
jne .LBB1_8
# %bb.11: # %._crit_edge27.i
# in Loop: Header=BB1_7 Depth=1
incq %r12
addq $8192, %rax # imm = 0x2000
cmpq $1024, %r12 # imm = 0x400
jne .LBB1_7
# %bb.12: # %_Z10host_dgemmiiiPKdS0_Pd.exit
movq 8(%rsp), %rdi
xorl %r12d, %r12d
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movq stdout(%rip), %rdi
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss .LCPI1_2(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %esi
movb $1, %al
callq fprintf
movq stdout(%rip), %rdi
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorps %xmm1, %xmm1
cvtss2sd %xmm0, %xmm1
divsd .LCPI1_3(%rip), %xmm1
movsd .LCPI1_4(%rip), %xmm0 # xmm0 = mem[0],zero
divsd %xmm1, %xmm0
mulsd .LCPI1_5(%rip), %xmm0
movl $.L.str.4, %esi
movb $1, %al
callq fprintf
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
callq hipDeviceReset
testl %eax, %eax
jne .LBB1_13
.LBB1_14:
movl %r12d, %eax
addq $24, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_1:
.cfi_def_cfa_offset 64
movq stderr(%rip), %rcx
movl $.L.str.1, %edi
movl $21, %esi
movl $1, %edx
callq fwrite@PLT
movl $911, %r12d # imm = 0x38F
jmp .LBB1_14
.LBB1_13:
movq stderr(%rip), %rcx
movl $.L.str.5, %edi
movl $22, %esi
movl $1, %edx
callq fwrite@PLT
movl $1, %r12d
jmp .LBB1_14
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Matrix size is %d\n"
.size .L.str, 19
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Error in host malloc\n"
.size .L.str.1, 22
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Total memory required is %lf MB\n"
.size .L.str.2, 33
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Total time CPU is %f sec\n"
.size .L.str.3, 26
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Performance is %f GFlop/s\n"
.size .L.str.4, 27
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "hipDeviceReset failed!"
.size .L.str.5, 23
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /**
*
* This is a cuda version of the array addition program as created from the
* tutorial from here:
*
* https://devblogs.nvidia.com/even-easier-introduction-cuda/
*
* Any adjustments made are made from suggestions from Programming Massively
* Parallel Processors, 3rd Edition:
*
* https://www.amazon.com/Programming-Massively-Parallel-Processors-Hands/dp/0128119861/ref=dp_ob_title_bk
*
* */
#include <cuda.h>
#include <iostream>
#include <math.h>
// Signifies a kernel function
__global__ // Runs on device code
void add(int n, float *x, float *y)
{
// Get the index for the thread that
// is running this kernel
int currentThread = threadIdx.x;
// Get the size of a block
int blockSize = blockDim.x;
// Loop over each thread. You start with
// the current thread, and then jump
// by the size of a block. This means
// that each thread handles 1 / blockSize
// of the data.
for (int i = currentThread; i < n; i += blockSize)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 100<<20; // 100M elements
float *x, *y;
// Allocate memory using Unified memory.
// Accessible either from the device or host.
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on N elements on the CPU
add<<<1, 256>>>(N, x, y);
// Wait for the GPU to finish before accessing the host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free the memory
cudaFree(x);
cudaFree(y);
return 0;
} | code for sm_80
Function : _Z3addiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x001fda0003f06270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ I2F.U32.RP R5, c[0x0][0x0] ; /* 0x0000000000057b06 */
/* 0x000e220000209000 */
/*0050*/ LOP3.LUT R4, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff047212 */
/* 0x000fe200078e33ff */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0070*/ ISETP.NE.U32.AND P2, PT, RZ, c[0x0][0x0], PT ; /* 0x00000000ff007a0c */
/* 0x000fe20003f45070 */
/*0080*/ BSSY B0, 0x2d0 ; /* 0x0000024000007945 */
/* 0x000fe20003800000 */
/*0090*/ IADD3 R4, R4, c[0x0][0x160], RZ ; /* 0x0000580004047a10 */
/* 0x000fc60007ffe0ff */
/*00a0*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */
/* 0x001e240000001000 */
/*00b0*/ IADD3 R2, R5, 0xffffffe, RZ ; /* 0x0ffffffe05027810 */
/* 0x001fcc0007ffe0ff */
/*00c0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*00d0*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*00e0*/ IMAD.MOV R7, RZ, RZ, -R3 ; /* 0x000000ffff077224 */
/* 0x002fc800078e0a03 */
/*00f0*/ IMAD R7, R7, c[0x0][0x0], RZ ; /* 0x0000000007077a24 */
/* 0x000fca00078e02ff */
/*0100*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fcc00078e0002 */
/*0110*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0120*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a03 */
/*0130*/ IMAD R4, R5, c[0x0][0x0], R4 ; /* 0x0000000005047a24 */
/* 0x000fca00078e0204 */
/*0140*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x0], PT ; /* 0x0000000004007a0c */
/* 0x000fda0003f06070 */
/*0150*/ @P0 IADD3 R4, R4, -c[0x0][0x0], RZ ; /* 0x8000000004040a10 */
/* 0x000fe40007ffe0ff */
/*0160*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fe40007ffe0ff */
/*0170*/ ISETP.GE.U32.AND P1, PT, R4, c[0x0][0x0], PT ; /* 0x0000000004007a0c */
/* 0x000fda0003f26070 */
/*0180*/ @P1 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103031810 */
/* 0x000fe40007ffe0ff */
/*0190*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x0], RZ, 0x33, !PT ; /* 0x00000000ff03aa12 */
/* 0x000fc800078e33ff */
/*01a0*/ IADD3 R2, R3.reuse, 0x1, RZ ; /* 0x0000000103027810 */
/* 0x040fe40007ffe0ff */
/*01b0*/ ISETP.GE.U32.AND P1, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fe40003f26070 */
/*01c0*/ LOP3.LUT P0, R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */
/* 0x000fda000780c0ff */
/*01d0*/ @!P0 BRA 0x2c0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*01e0*/ MOV R9, 0x4 ; /* 0x0000000400097802 */
/* 0x000fe20000000f00 */
/*01f0*/ IMAD.MOV.U32 R6, RZ, RZ, R2 ; /* 0x000000ffff067224 */
/* 0x000fc800078e0002 */
/*0200*/ IMAD.WIDE R2, R0, R9, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fc800078e0209 */
/*0210*/ IMAD.WIDE R4, R0, R9, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0209 */
/*0220*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea8000c1e1900 */
/*0230*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x0000a2000c1e1900 */
/*0240*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */
/* 0x000fe40007ffe0ff */
/*0250*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fe40007ffe0ff */
/*0260*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0270*/ IMAD.WIDE R4, R9, c[0x0][0x0], R4 ; /* 0x0000000009047a25 */
/* 0x001fc800078e0204 */
/*0280*/ FADD R7, R7, R8 ; /* 0x0000000807077221 */
/* 0x004fca0000000000 */
/*0290*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101904 */
/*02a0*/ IMAD.WIDE R2, R9, c[0x0][0x0], R2 ; /* 0x0000000009027a25 */
/* 0x001fe200078e0202 */
/*02b0*/ @P0 BRA 0x220 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*02c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02d0*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*02e0*/ HFMA2.MMA R21, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff157435 */
/* 0x000fd400000001ff */
/*02f0*/ IMAD.WIDE R2, R0, R21, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fc800078e0215 */
/*0300*/ IMAD.WIDE R4, R0, R21, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x001fe200078e0215 */
/*0310*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea8000c1e1900 */
/*0320*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea2000c1e1900 */
/*0330*/ IMAD.WIDE R8, R21, c[0x0][0x0], R4 ; /* 0x0000000015087a25 */
/* 0x000fc800078e0204 */
/*0340*/ FADD R15, R6, R7 ; /* 0x00000007060f7221 */
/* 0x004fe40000000000 */
/*0350*/ IMAD.WIDE R6, R21, c[0x0][0x0], R2 ; /* 0x0000000015067a25 */
/* 0x000fc600078e0202 */
/*0360*/ STG.E [R4.64], R15 ; /* 0x0000000f04007986 */
/* 0x0001e8000c101904 */
/*0370*/ LDG.E R10, [R8.64] ; /* 0x00000004080a7981 */
/* 0x000ea8000c1e1900 */
/*0380*/ LDG.E R11, [R6.64] ; /* 0x00000004060b7981 */
/* 0x000ea2000c1e1900 */
/*0390*/ IMAD.WIDE R12, R21, c[0x0][0x0], R8 ; /* 0x00000000150c7a25 */
/* 0x000fc800078e0208 */
/*03a0*/ FADD R17, R10, R11 ; /* 0x0000000b0a117221 */
/* 0x004fe40000000000 */
/*03b0*/ IMAD.WIDE R10, R21, c[0x0][0x0], R6 ; /* 0x00000000150a7a25 */
/* 0x000fc600078e0206 */
/*03c0*/ STG.E [R8.64], R17 ; /* 0x0000001108007986 */
/* 0x0003e8000c101904 */
/*03d0*/ LDG.E R2, [R12.64] ; /* 0x000000040c027981 */
/* 0x000ea8000c1e1900 */
/*03e0*/ LDG.E R3, [R10.64] ; /* 0x000000040a037981 */
/* 0x000ea2000c1e1900 */
/*03f0*/ IMAD.WIDE R4, R21, c[0x0][0x0], R12 ; /* 0x0000000015047a25 */
/* 0x001fc800078e020c */
/*0400*/ FADD R19, R2, R3 ; /* 0x0000000302137221 */
/* 0x004fe40000000000 */
/*0410*/ IMAD.WIDE R2, R21, c[0x0][0x0], R10 ; /* 0x0000000015027a25 */
/* 0x000fc600078e020a */
/*0420*/ STG.E [R12.64], R19 ; /* 0x000000130c007986 */
/* 0x0001e8000c101904 */
/*0430*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*0440*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea2000c1e1900 */
/*0450*/ MOV R15, c[0x0][0x0] ; /* 0x00000000000f7a02 */
/* 0x000fe20000000f00 */
/*0460*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff097624 */
/* 0x002fc600078e00ff */
/*0470*/ LEA R0, R15, R0, 0x1 ; /* 0x000000000f007211 */
/* 0x000fc800078e08ff */
/*0480*/ LEA R0, R9, R0, 0x1 ; /* 0x0000000009007211 */
/* 0x000fc800078e08ff */
/*0490*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fe20003f06270 */
/*04a0*/ FADD R7, R6, R3 ; /* 0x0000000306077221 */
/* 0x004fca0000000000 */
/*04b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001ee000c101904 */
/*04c0*/ @!P0 BRA 0x2e0 ; /* 0xfffffe1000008947 */
/* 0x000fea000383ffff */
/*04d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04e0*/ BRA 0x4e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /**
*
* This is a cuda version of the array addition program as created from the
* tutorial from here:
*
* https://devblogs.nvidia.com/even-easier-introduction-cuda/
*
* Any adjustments made are made from suggestions from Programming Massively
* Parallel Processors, 3rd Edition:
*
* https://www.amazon.com/Programming-Massively-Parallel-Processors-Hands/dp/0128119861/ref=dp_ob_title_bk
*
* */
#include <cuda.h>
#include <iostream>
#include <math.h>
// Signifies a kernel function
__global__ // Runs on device code
void add(int n, float *x, float *y)
{
// Get the index for the thread that
// is running this kernel
int currentThread = threadIdx.x;
// Get the size of a block
int blockSize = blockDim.x;
// Loop over each thread. You start with
// the current thread, and then jump
// by the size of a block. This means
// that each thread handles 1 / blockSize
// of the data.
for (int i = currentThread; i < n; i += blockSize)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 100<<20; // 100M elements
float *x, *y;
// Allocate memory using Unified memory.
// Accessible either from the device or host.
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on N elements on the CPU
add<<<1, 256>>>(N, x, y);
// Wait for the GPU to finish before accessing the host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free the memory
cudaFree(x);
cudaFree(y);
return 0;
} | .file "tmpxft_001071a5_00000000-6_cuda_vec-add-unified.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z3addiPfS_iPfS_
.type _Z25__device_stub__Z3addiPfS_iPfS_, @function
_Z25__device_stub__Z3addiPfS_iPfS_:
.LFB3694:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addiPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z25__device_stub__Z3addiPfS_iPfS_, .-_Z25__device_stub__Z3addiPfS_iPfS_
.globl _Z3addiPfS_
.type _Z3addiPfS_, @function
_Z3addiPfS_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3addiPfS_iPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3addiPfS_, .-_Z3addiPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "Max error: "
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $72, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 16(%rsp), %rdi
movl $1, %edx
movl $419430400, %esi
call cudaMallocManaged@PLT
leaq 24(%rsp), %rdi
movl $1, %edx
movl $419430400, %esi
call cudaMallocManaged@PLT
movl $0, %eax
movss .LC1(%rip), %xmm1
movss .LC2(%rip), %xmm0
.L12:
movq 16(%rsp), %rdx
movss %xmm1, (%rdx,%rax)
movq 24(%rsp), %rdx
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq $419430400, %rax
jne .L12
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
call cudaDeviceSynchronize@PLT
movq 24(%rsp), %rbx
leaq 419430400(%rbx), %rbp
movl $0x00000000, 12(%rsp)
.L14:
movss (%rbx), %xmm0
subss .LC3(%rip), %xmm0
andps .LC4(%rip), %xmm0
movss 12(%rsp), %xmm1
call fmaxf@PLT
movss %xmm0, 12(%rsp)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L14
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movl $104857600, %edi
call _Z25__device_stub__Z3addiPfS_iPfS_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z3addiPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addiPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1065353216
.align 4
.LC2:
.long 1073741824
.align 4
.LC3:
.long 1077936128
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC4:
.long 2147483647
.long 0
.long 0
.long 0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /**
*
* This is a cuda version of the array addition program as created from the
* tutorial from here:
*
* https://devblogs.nvidia.com/even-easier-introduction-cuda/
*
* Any adjustments made are made from suggestions from Programming Massively
* Parallel Processors, 3rd Edition:
*
* https://www.amazon.com/Programming-Massively-Parallel-Processors-Hands/dp/0128119861/ref=dp_ob_title_bk
*
* */
#include <cuda.h>
#include <iostream>
#include <math.h>
// Signifies a kernel function
__global__ // Runs on device code
void add(int n, float *x, float *y)
{
// Get the index for the thread that
// is running this kernel
int currentThread = threadIdx.x;
// Get the size of a block
int blockSize = blockDim.x;
// Loop over each thread. You start with
// the current thread, and then jump
// by the size of a block. This means
// that each thread handles 1 / blockSize
// of the data.
for (int i = currentThread; i < n; i += blockSize)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 100<<20; // 100M elements
float *x, *y;
// Allocate memory using Unified memory.
// Accessible either from the device or host.
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on N elements on the CPU
add<<<1, 256>>>(N, x, y);
// Wait for the GPU to finish before accessing the host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free the memory
cudaFree(x);
cudaFree(y);
return 0;
} | /**
*
* This is a cuda version of the array addition program as created from the
* tutorial from here:
*
* https://devblogs.nvidia.com/even-easier-introduction-cuda/
*
* Any adjustments made are made from suggestions from Programming Massively
* Parallel Processors, 3rd Edition:
*
* https://www.amazon.com/Programming-Massively-Parallel-Processors-Hands/dp/0128119861/ref=dp_ob_title_bk
*
* */
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
// Signifies a kernel function
__global__ // Runs on device code
void add(int n, float *x, float *y)
{
// Get the index for the thread that
// is running this kernel
int currentThread = threadIdx.x;
// Get the size of a block
int blockSize = blockDim.x;
// Loop over each thread. You start with
// the current thread, and then jump
// by the size of a block. This means
// that each thread handles 1 / blockSize
// of the data.
for (int i = currentThread; i < n; i += blockSize)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 100<<20; // 100M elements
float *x, *y;
// Allocate memory using Unified memory.
// Accessible either from the device or host.
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on N elements on the CPU
add<<<1, 256>>>(N, x, y);
// Wait for the GPU to finish before accessing the host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free the memory
hipFree(x);
hipFree(y);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /**
*
* This is a cuda version of the array addition program as created from the
* tutorial from here:
*
* https://devblogs.nvidia.com/even-easier-introduction-cuda/
*
* Any adjustments made are made from suggestions from Programming Massively
* Parallel Processors, 3rd Edition:
*
* https://www.amazon.com/Programming-Massively-Parallel-Processors-Hands/dp/0128119861/ref=dp_ob_title_bk
*
* */
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
// Signifies a kernel function
__global__ // Runs on device code
void add(int n, float *x, float *y)
{
// Get the index for the thread that
// is running this kernel
int currentThread = threadIdx.x;
// Get the size of a block
int blockSize = blockDim.x;
// Loop over each thread. You start with
// the current thread, and then jump
// by the size of a block. This means
// that each thread handles 1 / blockSize
// of the data.
for (int i = currentThread; i < n; i += blockSize)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 100<<20; // 100M elements
float *x, *y;
// Allocate memory using Unified memory.
// Accessible either from the device or host.
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on N elements on the CPU
add<<<1, 256>>>(N, x, y);
// Wait for the GPU to finish before accessing the host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free the memory
hipFree(x);
hipFree(y);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addiPfS_
.globl _Z3addiPfS_
.p2align 8
.type _Z3addiPfS_,@function
_Z3addiPfS_:
s_load_b32 s2, s[0:1], 0x0
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s2, v0
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x8
v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s1, s3, 0xffff
s_mov_b32 s3, 0
s_lshl_b32 s8, s1, 2
s_mov_b32 s9, s3
.p2align 6
.LBB0_2:
v_add_co_u32 v3, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo
v_add_co_u32 v5, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v2, vcc_lo
v_add_nc_u32_e32 v0, s1, v0
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v[5:6], off
v_add_co_u32 v1, vcc_lo, v1, s8
v_cmp_le_i32_e64 s0, s2, v0
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_2)
s_or_b32 s9, s0, s9
s_waitcnt vmcnt(0)
v_add_f32_e32 v3, v3, v4
global_store_b32 v[5:6], v3, off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addiPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 10
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addiPfS_, .Lfunc_end0-_Z3addiPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addiPfS_
.private_segment_fixed_size: 0
.sgpr_count: 12
.sgpr_spill_count: 0
.symbol: _Z3addiPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /**
*
* This is a cuda version of the array addition program as created from the
* tutorial from here:
*
* https://devblogs.nvidia.com/even-easier-introduction-cuda/
*
* Any adjustments made are made from suggestions from Programming Massively
* Parallel Processors, 3rd Edition:
*
* https://www.amazon.com/Programming-Massively-Parallel-Processors-Hands/dp/0128119861/ref=dp_ob_title_bk
*
* */
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
// Signifies a kernel function
__global__ // Runs on device code
void add(int n, float *x, float *y)
{
// Get the index for the thread that
// is running this kernel
int currentThread = threadIdx.x;
// Get the size of a block
int blockSize = blockDim.x;
// Loop over each thread. You start with
// the current thread, and then jump
// by the size of a block. This means
// that each thread handles 1 / blockSize
// of the data.
for (int i = currentThread; i < n; i += blockSize)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 100<<20; // 100M elements
float *x, *y;
// Allocate memory using Unified memory.
// Accessible either from the device or host.
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on N elements on the CPU
add<<<1, 256>>>(N, x, y);
// Wait for the GPU to finish before accessing the host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free the memory
hipFree(x);
hipFree(y);
return 0;
} | .text
.file "cuda_vec-add-unified.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addiPfS_ # -- Begin function _Z18__device_stub__addiPfS_
.p2align 4, 0x90
.type _Z18__device_stub__addiPfS_,@function
_Z18__device_stub__addiPfS_: # @_Z18__device_stub__addiPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addiPfS_, .Lfunc_end0-_Z18__device_stub__addiPfS_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0xc0400000 # float -3
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI1_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $152, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 16(%rsp), %rdi
movl $419430400, %esi # imm = 0x19000000
movl $1, %edx
callq hipMallocManaged
leaq 8(%rsp), %rdi
movl $419430400, %esi # imm = 0x19000000
movl $1, %edx
callq hipMallocManaged
movq 16(%rsp), %rax
xorl %ecx, %ecx
movq 8(%rsp), %rdx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%rax,%rcx,4) # imm = 0x3F800000
movl $1073741824, (%rdx,%rcx,4) # imm = 0x40000000
incq %rcx
cmpq $104857600, %rcx # imm = 0x6400000
jne .LBB1_1
# %bb.2:
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 255(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl $104857600, 28(%rsp) # imm = 0x6400000
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 88(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq hipDeviceSynchronize
xorps %xmm2, %xmm2
xorl %eax, %eax
movq 8(%rsp), %rcx
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movaps .LCPI1_1(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movaps %xmm2, %xmm5
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movss (%rcx,%rax,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
addss %xmm0, %xmm3
andps %xmm1, %xmm3
cmpunordss %xmm5, %xmm5
movaps %xmm5, %xmm4
andps %xmm3, %xmm4
maxss %xmm2, %xmm3
andnps %xmm3, %xmm5
orps %xmm4, %xmm5
incq %rax
movaps %xmm5, %xmm2
cmpq $104857600, %rax # imm = 0x6400000
jne .LBB1_5
# %bb.6:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $11, %edx
movaps %xmm5, 128(%rsp) # 16-byte Spill
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movaps 128(%rsp), %xmm0 # 16-byte Reload
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_11
# %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_9
# %bb.8:
movzbl 67(%rbx), %ecx
jmp .LBB1_10
.LBB1_9:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB1_11:
.cfi_def_cfa_offset 176
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addiPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addiPfS_,@object # @_Z3addiPfS_
.section .rodata,"a",@progbits
.globl _Z3addiPfS_
.p2align 3, 0x0
_Z3addiPfS_:
.quad _Z18__device_stub__addiPfS_
.size _Z3addiPfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Max error: "
.size .L.str, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addiPfS_"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addiPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addiPfS_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x001fda0003f06270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ I2F.U32.RP R5, c[0x0][0x0] ; /* 0x0000000000057b06 */
/* 0x000e220000209000 */
/*0050*/ LOP3.LUT R4, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff047212 */
/* 0x000fe200078e33ff */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0070*/ ISETP.NE.U32.AND P2, PT, RZ, c[0x0][0x0], PT ; /* 0x00000000ff007a0c */
/* 0x000fe20003f45070 */
/*0080*/ BSSY B0, 0x2d0 ; /* 0x0000024000007945 */
/* 0x000fe20003800000 */
/*0090*/ IADD3 R4, R4, c[0x0][0x160], RZ ; /* 0x0000580004047a10 */
/* 0x000fc60007ffe0ff */
/*00a0*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */
/* 0x001e240000001000 */
/*00b0*/ IADD3 R2, R5, 0xffffffe, RZ ; /* 0x0ffffffe05027810 */
/* 0x001fcc0007ffe0ff */
/*00c0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*00d0*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*00e0*/ IMAD.MOV R7, RZ, RZ, -R3 ; /* 0x000000ffff077224 */
/* 0x002fc800078e0a03 */
/*00f0*/ IMAD R7, R7, c[0x0][0x0], RZ ; /* 0x0000000007077a24 */
/* 0x000fca00078e02ff */
/*0100*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fcc00078e0002 */
/*0110*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0120*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a03 */
/*0130*/ IMAD R4, R5, c[0x0][0x0], R4 ; /* 0x0000000005047a24 */
/* 0x000fca00078e0204 */
/*0140*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x0], PT ; /* 0x0000000004007a0c */
/* 0x000fda0003f06070 */
/*0150*/ @P0 IADD3 R4, R4, -c[0x0][0x0], RZ ; /* 0x8000000004040a10 */
/* 0x000fe40007ffe0ff */
/*0160*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fe40007ffe0ff */
/*0170*/ ISETP.GE.U32.AND P1, PT, R4, c[0x0][0x0], PT ; /* 0x0000000004007a0c */
/* 0x000fda0003f26070 */
/*0180*/ @P1 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103031810 */
/* 0x000fe40007ffe0ff */
/*0190*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x0], RZ, 0x33, !PT ; /* 0x00000000ff03aa12 */
/* 0x000fc800078e33ff */
/*01a0*/ IADD3 R2, R3.reuse, 0x1, RZ ; /* 0x0000000103027810 */
/* 0x040fe40007ffe0ff */
/*01b0*/ ISETP.GE.U32.AND P1, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fe40003f26070 */
/*01c0*/ LOP3.LUT P0, R2, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302027812 */
/* 0x000fda000780c0ff */
/*01d0*/ @!P0 BRA 0x2c0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*01e0*/ MOV R9, 0x4 ; /* 0x0000000400097802 */
/* 0x000fe20000000f00 */
/*01f0*/ IMAD.MOV.U32 R6, RZ, RZ, R2 ; /* 0x000000ffff067224 */
/* 0x000fc800078e0002 */
/*0200*/ IMAD.WIDE R2, R0, R9, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fc800078e0209 */
/*0210*/ IMAD.WIDE R4, R0, R9, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0209 */
/*0220*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea8000c1e1900 */
/*0230*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x0000a2000c1e1900 */
/*0240*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */
/* 0x000fe40007ffe0ff */
/*0250*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fe40007ffe0ff */
/*0260*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0270*/ IMAD.WIDE R4, R9, c[0x0][0x0], R4 ; /* 0x0000000009047a25 */
/* 0x001fc800078e0204 */
/*0280*/ FADD R7, R7, R8 ; /* 0x0000000807077221 */
/* 0x004fca0000000000 */
/*0290*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101904 */
/*02a0*/ IMAD.WIDE R2, R9, c[0x0][0x0], R2 ; /* 0x0000000009027a25 */
/* 0x001fe200078e0202 */
/*02b0*/ @P0 BRA 0x220 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*02c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02d0*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*02e0*/ HFMA2.MMA R21, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff157435 */
/* 0x000fd400000001ff */
/*02f0*/ IMAD.WIDE R2, R0, R21, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fc800078e0215 */
/*0300*/ IMAD.WIDE R4, R0, R21, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x001fe200078e0215 */
/*0310*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea8000c1e1900 */
/*0320*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea2000c1e1900 */
/*0330*/ IMAD.WIDE R8, R21, c[0x0][0x0], R4 ; /* 0x0000000015087a25 */
/* 0x000fc800078e0204 */
/*0340*/ FADD R15, R6, R7 ; /* 0x00000007060f7221 */
/* 0x004fe40000000000 */
/*0350*/ IMAD.WIDE R6, R21, c[0x0][0x0], R2 ; /* 0x0000000015067a25 */
/* 0x000fc600078e0202 */
/*0360*/ STG.E [R4.64], R15 ; /* 0x0000000f04007986 */
/* 0x0001e8000c101904 */
/*0370*/ LDG.E R10, [R8.64] ; /* 0x00000004080a7981 */
/* 0x000ea8000c1e1900 */
/*0380*/ LDG.E R11, [R6.64] ; /* 0x00000004060b7981 */
/* 0x000ea2000c1e1900 */
/*0390*/ IMAD.WIDE R12, R21, c[0x0][0x0], R8 ; /* 0x00000000150c7a25 */
/* 0x000fc800078e0208 */
/*03a0*/ FADD R17, R10, R11 ; /* 0x0000000b0a117221 */
/* 0x004fe40000000000 */
/*03b0*/ IMAD.WIDE R10, R21, c[0x0][0x0], R6 ; /* 0x00000000150a7a25 */
/* 0x000fc600078e0206 */
/*03c0*/ STG.E [R8.64], R17 ; /* 0x0000001108007986 */
/* 0x0003e8000c101904 */
/*03d0*/ LDG.E R2, [R12.64] ; /* 0x000000040c027981 */
/* 0x000ea8000c1e1900 */
/*03e0*/ LDG.E R3, [R10.64] ; /* 0x000000040a037981 */
/* 0x000ea2000c1e1900 */
/*03f0*/ IMAD.WIDE R4, R21, c[0x0][0x0], R12 ; /* 0x0000000015047a25 */
/* 0x001fc800078e020c */
/*0400*/ FADD R19, R2, R3 ; /* 0x0000000302137221 */
/* 0x004fe40000000000 */
/*0410*/ IMAD.WIDE R2, R21, c[0x0][0x0], R10 ; /* 0x0000000015027a25 */
/* 0x000fc600078e020a */
/*0420*/ STG.E [R12.64], R19 ; /* 0x000000130c007986 */
/* 0x0001e8000c101904 */
/*0430*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*0440*/ LDG.E R6, [R4.64] ; /* 0x0000000404067981 */
/* 0x000ea2000c1e1900 */
/*0450*/ MOV R15, c[0x0][0x0] ; /* 0x00000000000f7a02 */
/* 0x000fe20000000f00 */
/*0460*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff097624 */
/* 0x002fc600078e00ff */
/*0470*/ LEA R0, R15, R0, 0x1 ; /* 0x000000000f007211 */
/* 0x000fc800078e08ff */
/*0480*/ LEA R0, R9, R0, 0x1 ; /* 0x0000000009007211 */
/* 0x000fc800078e08ff */
/*0490*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fe20003f06270 */
/*04a0*/ FADD R7, R6, R3 ; /* 0x0000000306077221 */
/* 0x004fca0000000000 */
/*04b0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001ee000c101904 */
/*04c0*/ @!P0 BRA 0x2e0 ; /* 0xfffffe1000008947 */
/* 0x000fea000383ffff */
/*04d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*04e0*/ BRA 0x4e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0500*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addiPfS_
.globl _Z3addiPfS_
.p2align 8
.type _Z3addiPfS_,@function
_Z3addiPfS_:
s_load_b32 s2, s[0:1], 0x0
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s2, v0
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x8
v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0
s_waitcnt lgkmcnt(0)
s_and_b32 s1, s3, 0xffff
s_mov_b32 s3, 0
s_lshl_b32 s8, s1, 2
s_mov_b32 s9, s3
.p2align 6
.LBB0_2:
v_add_co_u32 v3, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo
v_add_co_u32 v5, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v2, vcc_lo
v_add_nc_u32_e32 v0, s1, v0
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v[5:6], off
v_add_co_u32 v1, vcc_lo, v1, s8
v_cmp_le_i32_e64 s0, s2, v0
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_2)
s_or_b32 s9, s0, s9
s_waitcnt vmcnt(0)
v_add_f32_e32 v3, v3, v4
global_store_b32 v[5:6], v3, off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addiPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 10
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addiPfS_, .Lfunc_end0-_Z3addiPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addiPfS_
.private_segment_fixed_size: 0
.sgpr_count: 12
.sgpr_spill_count: 0
.symbol: _Z3addiPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001071a5_00000000-6_cuda_vec-add-unified.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z3addiPfS_iPfS_
.type _Z25__device_stub__Z3addiPfS_iPfS_, @function
_Z25__device_stub__Z3addiPfS_iPfS_:
.LFB3694:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addiPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z25__device_stub__Z3addiPfS_iPfS_, .-_Z25__device_stub__Z3addiPfS_iPfS_
.globl _Z3addiPfS_
.type _Z3addiPfS_, @function
_Z3addiPfS_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3addiPfS_iPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z3addiPfS_, .-_Z3addiPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "Max error: "
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $72, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 16(%rsp), %rdi
movl $1, %edx
movl $419430400, %esi
call cudaMallocManaged@PLT
leaq 24(%rsp), %rdi
movl $1, %edx
movl $419430400, %esi
call cudaMallocManaged@PLT
movl $0, %eax
movss .LC1(%rip), %xmm1
movss .LC2(%rip), %xmm0
.L12:
movq 16(%rsp), %rdx
movss %xmm1, (%rdx,%rax)
movq 24(%rsp), %rdx
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq $419430400, %rax
jne .L12
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
call cudaDeviceSynchronize@PLT
movq 24(%rsp), %rbx
leaq 419430400(%rbx), %rbp
movl $0x00000000, 12(%rsp)
.L14:
movss (%rbx), %xmm0
subss .LC3(%rip), %xmm0
andps .LC4(%rip), %xmm0
movss 12(%rsp), %xmm1
call fmaxf@PLT
movss %xmm0, 12(%rsp)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L14
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movl $104857600, %edi
call _Z25__device_stub__Z3addiPfS_iPfS_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z3addiPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addiPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1065353216
.align 4
.LC2:
.long 1073741824
.align 4
.LC3:
.long 1077936128
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC4:
.long 2147483647
.long 0
.long 0
.long 0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda_vec-add-unified.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addiPfS_ # -- Begin function _Z18__device_stub__addiPfS_
.p2align 4, 0x90
.type _Z18__device_stub__addiPfS_,@function
_Z18__device_stub__addiPfS_: # @_Z18__device_stub__addiPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addiPfS_, .Lfunc_end0-_Z18__device_stub__addiPfS_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0xc0400000 # float -3
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI1_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $152, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 16(%rsp), %rdi
movl $419430400, %esi # imm = 0x19000000
movl $1, %edx
callq hipMallocManaged
leaq 8(%rsp), %rdi
movl $419430400, %esi # imm = 0x19000000
movl $1, %edx
callq hipMallocManaged
movq 16(%rsp), %rax
xorl %ecx, %ecx
movq 8(%rsp), %rdx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%rax,%rcx,4) # imm = 0x3F800000
movl $1073741824, (%rdx,%rcx,4) # imm = 0x40000000
incq %rcx
cmpq $104857600, %rcx # imm = 0x6400000
jne .LBB1_1
# %bb.2:
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 255(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl $104857600, 28(%rsp) # imm = 0x6400000
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 88(%rsp), %rax
movq %rax, 104(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq hipDeviceSynchronize
xorps %xmm2, %xmm2
xorl %eax, %eax
movq 8(%rsp), %rcx
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movaps .LCPI1_1(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movaps %xmm2, %xmm5
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movss (%rcx,%rax,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
addss %xmm0, %xmm3
andps %xmm1, %xmm3
cmpunordss %xmm5, %xmm5
movaps %xmm5, %xmm4
andps %xmm3, %xmm4
maxss %xmm2, %xmm3
andnps %xmm3, %xmm5
orps %xmm4, %xmm5
incq %rax
movaps %xmm5, %xmm2
cmpq $104857600, %rax # imm = 0x6400000
jne .LBB1_5
# %bb.6:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $11, %edx
movaps %xmm5, 128(%rsp) # 16-byte Spill
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movaps 128(%rsp), %xmm0 # 16-byte Reload
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_11
# %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_9
# %bb.8:
movzbl 67(%rbx), %ecx
jmp .LBB1_10
.LBB1_9:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB1_11:
.cfi_def_cfa_offset 176
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addiPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addiPfS_,@object # @_Z3addiPfS_
.section .rodata,"a",@progbits
.globl _Z3addiPfS_
.p2align 3, 0x0
_Z3addiPfS_:
.quad _Z18__device_stub__addiPfS_
.size _Z3addiPfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Max error: "
.size .L.str, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addiPfS_"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addiPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addiPfS_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include <stdio.h>
#define THREADS 16
#define BLOCKS 8
__global__ void __add__(int *array, int *size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > *size) return;
int temp = 0;
int before = (idx + 1) % *size;
int after = idx - 1;
if (after < 0) after = *size - 1;
temp += array[idx];
temp += array[before];
temp += array[after];
__syncthreads(); // Barrera...
array[idx] = temp;
}
extern "C" void call_device_sum(int *h_a, int size)
{
int *dev_a = NULL;
int *dev_size = NULL;
cudaMalloc(&dev_size, sizeof(int));
cudaMemset(dev_size, 0, sizeof(int));
cudaMemcpy(dev_size, &size, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&dev_a, (size + 2) * sizeof(int));
cudaMemset(dev_a, 0, (size + 2) * sizeof(int));
cudaMemcpy(dev_a, h_a, (size + 2) * sizeof(int), cudaMemcpyHostToDevice);
__add__ <<<BLOCKS, THREADS>>>(dev_a, dev_size);
// se transfieren los datos del dispositivo a memoria.
cudaMemcpy(h_a, dev_a, (size + 2) * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_size);
} | code for sm_80
Function : _Z7__add__PiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff027624 */
/* 0x000fe200078e00ff */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */
/* 0x000fca00078e00ff */
/*0040*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea8000c1e1900 */
/*0050*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0060*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0207 */
/*0080*/ ISETP.GT.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x004fda0003f04270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IABS R8, R5.reuse ; /* 0x0000000500087213 */
/* 0x080fe40000000000 */
/*00b0*/ IABS R9, R5 ; /* 0x0000000500097213 */
/* 0x000fe40000000000 */
/*00c0*/ I2F.RP R6, R8 ; /* 0x0000000800067306 */
/* 0x000e220000209400 */
/*00d0*/ IADD3 R4, R0, 0x1, RZ ; /* 0x0000000100047810 */
/* 0x000fe40007ffe0ff */
/*00e0*/ IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff097224 */
/* 0x000fe400078e0a09 */
/*00f0*/ ISETP.GE.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc60003f26270 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R2, R6, 0xffffffe, RZ ; /* 0x0ffffffe06027810 */
/* 0x001fe40007ffe0ff */
/*0120*/ IABS R6, R4 ; /* 0x0000000400067213 */
/* 0x000fc80000000000 */
/*0130*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0140*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe200078e00ff */
/*0150*/ IADD3 R7, RZ, -R3, RZ ; /* 0x80000003ff077210 */
/* 0x002fca0007ffe0ff */
/*0160*/ IMAD R7, R7, R8, RZ ; /* 0x0000000807077224 */
/* 0x000fc800078e02ff */
/*0170*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fe200078e0002 */
/*0180*/ MOV R2, R9 ; /* 0x0000000900027202 */
/* 0x000fe20000000f00 */
/*0190*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fc800000001ff */
/*01a0*/ IMAD.HI.U32 R3, R3, R6, RZ ; /* 0x0000000603037227 */
/* 0x000fc800078e00ff */
/*01b0*/ IMAD R3, R3, R2, R6 ; /* 0x0000000203037224 */
/* 0x000fca00078e0206 */
/*01c0*/ ISETP.GT.U32.AND P0, PT, R8, R3, PT ; /* 0x000000030800720c */
/* 0x000fda0003f04070 */
/*01d0*/ @!P0 IMAD.IADD R3, R3, 0x1, -R8 ; /* 0x0000000103038824 */
/* 0x000fe200078e0a08 */
/*01e0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fc80003f05270 */
/*01f0*/ ISETP.GT.U32.AND P2, PT, R8, R3, PT ; /* 0x000000030800720c */
/* 0x000fda0003f44070 */
/*0200*/ @!P2 IMAD.IADD R3, R3, 0x1, -R8 ; /* 0x000000010303a824 */
/* 0x000fe200078e0a08 */
/*0210*/ ISETP.GE.AND P2, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fc80003f46270 */
/*0220*/ MOV R4, R3 ; /* 0x0000000300047202 */
/* 0x000fe40000000f00 */
/*0230*/ SEL R2, R5, R0, !P2 ; /* 0x0000000005027207 */
/* 0x000fc60005000000 */
/*0240*/ @!P1 IMAD.MOV R4, RZ, RZ, -R4 ; /* 0x000000ffff049224 */
/* 0x000fe200078e0a04 */
/*0250*/ IADD3 R6, R2, -0x1, RZ ; /* 0xffffffff02067810 */
/* 0x000fe20007ffe0ff */
/*0260*/ IMAD.WIDE R2, R0, R9, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e0209 */
/*0270*/ @!P0 LOP3.LUT R4, RZ, R5, RZ, 0x33, !PT ; /* 0x00000005ff048212 */
/* 0x000fc600078e33ff */
/*0280*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0209 */
/*0290*/ IMAD.WIDE R4, R4, R9, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0209 */
/*02a0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ea8000c1e1900 */
/*02b0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea8000c1e1900 */
/*02c0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*02d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*02e0*/ IADD3 R9, R6, R4, R9 ; /* 0x0000000406097210 */
/* 0x004fca0007ffe009 */
/*02f0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe2000c101904 */
/*0300*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0310*/ BRA 0x310; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <stdio.h>
#define THREADS 16
#define BLOCKS 8
__global__ void __add__(int *array, int *size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > *size) return;
int temp = 0;
int before = (idx + 1) % *size;
int after = idx - 1;
if (after < 0) after = *size - 1;
temp += array[idx];
temp += array[before];
temp += array[after];
__syncthreads(); // Barrera...
array[idx] = temp;
}
extern "C" void call_device_sum(int *h_a, int size)
{
int *dev_a = NULL;
int *dev_size = NULL;
cudaMalloc(&dev_size, sizeof(int));
cudaMemset(dev_size, 0, sizeof(int));
cudaMemcpy(dev_size, &size, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&dev_a, (size + 2) * sizeof(int));
cudaMemset(dev_a, 0, (size + 2) * sizeof(int));
cudaMemcpy(dev_a, h_a, (size + 2) * sizeof(int), cudaMemcpyHostToDevice);
__add__ <<<BLOCKS, THREADS>>>(dev_a, dev_size);
// se transfieren los datos del dispositivo a memoria.
cudaMemcpy(h_a, dev_a, (size + 2) * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_size);
} | .file "tmpxft_0006c00e_00000000-6_add.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z28__device_stub__Z7__add__PiS_PiS_
.type _Z28__device_stub__Z7__add__PiS_PiS_, @function
_Z28__device_stub__Z7__add__PiS_PiS_:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z7__add__PiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z28__device_stub__Z7__add__PiS_PiS_, .-_Z28__device_stub__Z7__add__PiS_PiS_
.globl _Z7__add__PiS_
.type _Z7__add__PiS_, @function
_Z7__add__PiS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z7__add__PiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z7__add__PiS_, .-_Z7__add__PiS_
.globl call_device_sum
.type call_device_sum, @function
call_device_sum:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbx
movl %esi, 12(%rsp)
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq $0, 16(%rsp)
movq $0, 24(%rsp)
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $4, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
leaq 12(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl 12(%rsp), %eax
leal 2(%rax), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 16(%rsp), %rdi
call cudaMalloc@PLT
movl 12(%rsp), %eax
leal 2(%rax), %edx
movslq %edx, %rdx
salq $2, %rdx
movl $0, %esi
movq 16(%rsp), %rdi
call cudaMemset@PLT
movl 12(%rsp), %eax
leal 2(%rax), %edx
movslq %edx, %rdx
salq $2, %rdx
movl $1, %ecx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $16, 44(%rsp)
movl $1, 48(%rsp)
movl $8, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl 12(%rsp), %eax
leal 2(%rax), %edx
movslq %edx, %rdx
salq $2, %rdx
movl $2, %ecx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z28__device_stub__Z7__add__PiS_PiS_
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size call_device_sum, .-call_device_sum
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7__add__PiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7__add__PiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <stdio.h>
#define THREADS 16
#define BLOCKS 8
__global__ void __add__(int *array, int *size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > *size) return;
int temp = 0;
int before = (idx + 1) % *size;
int after = idx - 1;
if (after < 0) after = *size - 1;
temp += array[idx];
temp += array[before];
temp += array[after];
__syncthreads(); // Barrera...
array[idx] = temp;
}
extern "C" void call_device_sum(int *h_a, int size)
{
int *dev_a = NULL;
int *dev_size = NULL;
cudaMalloc(&dev_size, sizeof(int));
cudaMemset(dev_size, 0, sizeof(int));
cudaMemcpy(dev_size, &size, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&dev_a, (size + 2) * sizeof(int));
cudaMemset(dev_a, 0, (size + 2) * sizeof(int));
cudaMemcpy(dev_a, h_a, (size + 2) * sizeof(int), cudaMemcpyHostToDevice);
__add__ <<<BLOCKS, THREADS>>>(dev_a, dev_size);
// se transfieren los datos del dispositivo a memoria.
cudaMemcpy(h_a, dev_a, (size + 2) * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_size);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#define THREADS 16
#define BLOCKS 8
__global__ void __add__(int *array, int *size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > *size) return;
int temp = 0;
int before = (idx + 1) % *size;
int after = idx - 1;
if (after < 0) after = *size - 1;
temp += array[idx];
temp += array[before];
temp += array[after];
__syncthreads(); // Barrera...
array[idx] = temp;
}
extern "C" void call_device_sum(int *h_a, int size)
{
int *dev_a = NULL;
int *dev_size = NULL;
hipMalloc(&dev_size, sizeof(int));
hipMemset(dev_size, 0, sizeof(int));
hipMemcpy(dev_size, &size, sizeof(int), hipMemcpyHostToDevice);
hipMalloc(&dev_a, (size + 2) * sizeof(int));
hipMemset(dev_a, 0, (size + 2) * sizeof(int));
hipMemcpy(dev_a, h_a, (size + 2) * sizeof(int), hipMemcpyHostToDevice);
__add__ <<<BLOCKS, THREADS>>>(dev_a, dev_size);
// se transfieren los datos del dispositivo a memoria.
hipMemcpy(h_a, dev_a, (size + 2) * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_size);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#define THREADS 16
#define BLOCKS 8
__global__ void __add__(int *array, int *size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > *size) return;
int temp = 0;
int before = (idx + 1) % *size;
int after = idx - 1;
if (after < 0) after = *size - 1;
temp += array[idx];
temp += array[before];
temp += array[after];
__syncthreads(); // Barrera...
array[idx] = temp;
}
extern "C" void call_device_sum(int *h_a, int size)
{
int *dev_a = NULL;
int *dev_size = NULL;
hipMalloc(&dev_size, sizeof(int));
hipMemset(dev_size, 0, sizeof(int));
hipMemcpy(dev_size, &size, sizeof(int), hipMemcpyHostToDevice);
hipMalloc(&dev_a, (size + 2) * sizeof(int));
hipMemset(dev_a, 0, (size + 2) * sizeof(int));
hipMemcpy(dev_a, h_a, (size + 2) * sizeof(int), hipMemcpyHostToDevice);
__add__ <<<BLOCKS, THREADS>>>(dev_a, dev_size);
// se transfieren los datos del dispositivo a memoria.
hipMemcpy(h_a, dev_a, (size + 2) * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_size);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7__add__PiS_
.globl _Z7__add__PiS_
.p2align 8
.type _Z7__add__PiS_,@function
_Z7__add__PiS_:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x8
s_load_b32 s4, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[2:3], 0x0
s_and_b32 s3, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_ge_i32_e64 s2, v1
s_cbranch_execz .LBB0_2
s_ashr_i32 s3, s2, 31
v_add_nc_u32_e32 v2, 1, v1
s_add_i32 s4, s2, s3
s_load_b64 s[0:1], s[0:1], 0x0
s_xor_b32 s3, s4, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v4, 31, v2
v_cvt_f32_u32_e32 v0, s3
s_sub_i32 s4, 0, s3
v_add_nc_u32_e32 v2, v2, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
v_xor_b32_e32 v2, v2, v4
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v0, v0
v_mul_lo_u32 v3, s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v3, v0, v3
v_add_nc_u32_e32 v0, v0, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v0, v2, v0
v_mul_lo_u32 v0, v0, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v0, v2, v0
v_subrev_nc_u32_e32 v2, s3, v0
v_cmp_le_u32_e32 vcc_lo, s3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
v_subrev_nc_u32_e32 v2, s3, v0
v_cmp_le_u32_e32 vcc_lo, s3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, 1, v1
v_ashrrev_i32_e32 v2, 31, v1
v_xor_b32_e32 v0, v0, v4
v_cndmask_b32_e64 v3, v1, s2, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v5, v0, v4
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[2:3], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v0
v_lshlrev_b64 v[4:5], 2, v[5:6]
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_clause 0x2
global_load_b32 v6, v[0:1], off
global_load_b32 v4, v[4:5], off
global_load_b32 v2, v[2:3], off offset:-4
s_waitcnt vmcnt(0)
s_barrier
buffer_gl0_inv
v_add3_u32 v2, v4, v6, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7__add__PiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7__add__PiS_, .Lfunc_end0-_Z7__add__PiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7__add__PiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7__add__PiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#define THREADS 16
#define BLOCKS 8
__global__ void __add__(int *array, int *size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > *size) return;
int temp = 0;
int before = (idx + 1) % *size;
int after = idx - 1;
if (after < 0) after = *size - 1;
temp += array[idx];
temp += array[before];
temp += array[after];
__syncthreads(); // Barrera...
array[idx] = temp;
}
extern "C" void call_device_sum(int *h_a, int size)
{
int *dev_a = NULL;
int *dev_size = NULL;
hipMalloc(&dev_size, sizeof(int));
hipMemset(dev_size, 0, sizeof(int));
hipMemcpy(dev_size, &size, sizeof(int), hipMemcpyHostToDevice);
hipMalloc(&dev_a, (size + 2) * sizeof(int));
hipMemset(dev_a, 0, (size + 2) * sizeof(int));
hipMemcpy(dev_a, h_a, (size + 2) * sizeof(int), hipMemcpyHostToDevice);
__add__ <<<BLOCKS, THREADS>>>(dev_a, dev_size);
// se transfieren los datos del dispositivo a memoria.
hipMemcpy(h_a, dev_a, (size + 2) * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_size);
} | .text
.file "add.hip"
.globl _Z22__device_stub____add__PiS_ # -- Begin function _Z22__device_stub____add__PiS_
.p2align 4, 0x90
.type _Z22__device_stub____add__PiS_,@function
_Z22__device_stub____add__PiS_: # @_Z22__device_stub____add__PiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z7__add__PiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z22__device_stub____add__PiS_, .Lfunc_end0-_Z22__device_stub____add__PiS_
.cfi_endproc
# -- End function
.globl call_device_sum # -- Begin function call_device_sum
.p2align 4, 0x90
.type call_device_sum,@function
call_device_sum: # @call_device_sum
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $112, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -16
movq %rdi, %rbx
movl %esi, 12(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq 24(%rsp), %rdi
movl $4, %edx
xorl %esi, %esi
callq hipMemset
movq 24(%rsp), %rdi
leaq 12(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movslq 12(%rsp), %rax
leaq 8(,%rax,4), %rsi
leaq 16(%rsp), %rdi
callq hipMalloc
movq 16(%rsp), %rdi
movslq 12(%rsp), %rax
leaq 8(,%rax,4), %rdx
xorl %esi, %esi
callq hipMemset
movq 16(%rsp), %rdi
movslq 12(%rsp), %rax
leaq 8(,%rax,4), %rdx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967304, %rdi # imm = 0x100000008
leaq 8(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7__add__PiS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 16(%rsp), %rsi
movslq 12(%rsp), %rax
leaq 8(,%rax,4), %rdx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
addq $112, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size call_device_sum, .Lfunc_end1-call_device_sum
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7__add__PiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7__add__PiS_,@object # @_Z7__add__PiS_
.section .rodata,"a",@progbits
.globl _Z7__add__PiS_
.p2align 3, 0x0
_Z7__add__PiS_:
.quad _Z22__device_stub____add__PiS_
.size _Z7__add__PiS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7__add__PiS_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub____add__PiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7__add__PiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z7__add__PiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff027624 */
/* 0x000fe200078e00ff */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */
/* 0x000fca00078e00ff */
/*0040*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea8000c1e1900 */
/*0050*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0060*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0207 */
/*0080*/ ISETP.GT.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x004fda0003f04270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IABS R8, R5.reuse ; /* 0x0000000500087213 */
/* 0x080fe40000000000 */
/*00b0*/ IABS R9, R5 ; /* 0x0000000500097213 */
/* 0x000fe40000000000 */
/*00c0*/ I2F.RP R6, R8 ; /* 0x0000000800067306 */
/* 0x000e220000209400 */
/*00d0*/ IADD3 R4, R0, 0x1, RZ ; /* 0x0000000100047810 */
/* 0x000fe40007ffe0ff */
/*00e0*/ IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff097224 */
/* 0x000fe400078e0a09 */
/*00f0*/ ISETP.GE.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc60003f26270 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R2, R6, 0xffffffe, RZ ; /* 0x0ffffffe06027810 */
/* 0x001fe40007ffe0ff */
/*0120*/ IABS R6, R4 ; /* 0x0000000400067213 */
/* 0x000fc80000000000 */
/*0130*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0140*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe200078e00ff */
/*0150*/ IADD3 R7, RZ, -R3, RZ ; /* 0x80000003ff077210 */
/* 0x002fca0007ffe0ff */
/*0160*/ IMAD R7, R7, R8, RZ ; /* 0x0000000807077224 */
/* 0x000fc800078e02ff */
/*0170*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fe200078e0002 */
/*0180*/ MOV R2, R9 ; /* 0x0000000900027202 */
/* 0x000fe20000000f00 */
/*0190*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fc800000001ff */
/*01a0*/ IMAD.HI.U32 R3, R3, R6, RZ ; /* 0x0000000603037227 */
/* 0x000fc800078e00ff */
/*01b0*/ IMAD R3, R3, R2, R6 ; /* 0x0000000203037224 */
/* 0x000fca00078e0206 */
/*01c0*/ ISETP.GT.U32.AND P0, PT, R8, R3, PT ; /* 0x000000030800720c */
/* 0x000fda0003f04070 */
/*01d0*/ @!P0 IMAD.IADD R3, R3, 0x1, -R8 ; /* 0x0000000103038824 */
/* 0x000fe200078e0a08 */
/*01e0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fc80003f05270 */
/*01f0*/ ISETP.GT.U32.AND P2, PT, R8, R3, PT ; /* 0x000000030800720c */
/* 0x000fda0003f44070 */
/*0200*/ @!P2 IMAD.IADD R3, R3, 0x1, -R8 ; /* 0x000000010303a824 */
/* 0x000fe200078e0a08 */
/*0210*/ ISETP.GE.AND P2, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fc80003f46270 */
/*0220*/ MOV R4, R3 ; /* 0x0000000300047202 */
/* 0x000fe40000000f00 */
/*0230*/ SEL R2, R5, R0, !P2 ; /* 0x0000000005027207 */
/* 0x000fc60005000000 */
/*0240*/ @!P1 IMAD.MOV R4, RZ, RZ, -R4 ; /* 0x000000ffff049224 */
/* 0x000fe200078e0a04 */
/*0250*/ IADD3 R6, R2, -0x1, RZ ; /* 0xffffffff02067810 */
/* 0x000fe20007ffe0ff */
/*0260*/ IMAD.WIDE R2, R0, R9, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e0209 */
/*0270*/ @!P0 LOP3.LUT R4, RZ, R5, RZ, 0x33, !PT ; /* 0x00000005ff048212 */
/* 0x000fc600078e33ff */
/*0280*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0209 */
/*0290*/ IMAD.WIDE R4, R4, R9, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0209 */
/*02a0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ea8000c1e1900 */
/*02b0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea8000c1e1900 */
/*02c0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*02d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*02e0*/ IADD3 R9, R6, R4, R9 ; /* 0x0000000406097210 */
/* 0x004fca0007ffe009 */
/*02f0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe2000c101904 */
/*0300*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0310*/ BRA 0x310; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7__add__PiS_
.globl _Z7__add__PiS_
.p2align 8
.type _Z7__add__PiS_,@function
_Z7__add__PiS_:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x8
s_load_b32 s4, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[2:3], 0x0
s_and_b32 s3, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
v_cmpx_ge_i32_e64 s2, v1
s_cbranch_execz .LBB0_2
s_ashr_i32 s3, s2, 31
v_add_nc_u32_e32 v2, 1, v1
s_add_i32 s4, s2, s3
s_load_b64 s[0:1], s[0:1], 0x0
s_xor_b32 s3, s4, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v4, 31, v2
v_cvt_f32_u32_e32 v0, s3
s_sub_i32 s4, 0, s3
v_add_nc_u32_e32 v2, v2, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
v_xor_b32_e32 v2, v2, v4
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v0, v0
v_mul_lo_u32 v3, s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v3, v0, v3
v_add_nc_u32_e32 v0, v0, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v0, v2, v0
v_mul_lo_u32 v0, v0, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v0, v2, v0
v_subrev_nc_u32_e32 v2, s3, v0
v_cmp_le_u32_e32 vcc_lo, s3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
v_subrev_nc_u32_e32 v2, s3, v0
v_cmp_le_u32_e32 vcc_lo, s3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, 1, v1
v_ashrrev_i32_e32 v2, 31, v1
v_xor_b32_e32 v0, v0, v4
v_cndmask_b32_e64 v3, v1, s2, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v5, v0, v4
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[2:3], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v0
v_lshlrev_b64 v[4:5], 2, v[5:6]
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_clause 0x2
global_load_b32 v6, v[0:1], off
global_load_b32 v4, v[4:5], off
global_load_b32 v2, v[2:3], off offset:-4
s_waitcnt vmcnt(0)
s_barrier
buffer_gl0_inv
v_add3_u32 v2, v4, v6, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7__add__PiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7__add__PiS_, .Lfunc_end0-_Z7__add__PiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7__add__PiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7__add__PiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0006c00e_00000000-6_add.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z28__device_stub__Z7__add__PiS_PiS_
.type _Z28__device_stub__Z7__add__PiS_PiS_, @function
_Z28__device_stub__Z7__add__PiS_PiS_:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z7__add__PiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z28__device_stub__Z7__add__PiS_PiS_, .-_Z28__device_stub__Z7__add__PiS_PiS_
.globl _Z7__add__PiS_
.type _Z7__add__PiS_, @function
_Z7__add__PiS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z7__add__PiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z7__add__PiS_, .-_Z7__add__PiS_
.globl call_device_sum
.type call_device_sum, @function
call_device_sum:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbx
movl %esi, 12(%rsp)
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq $0, 16(%rsp)
movq $0, 24(%rsp)
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $4, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
leaq 12(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl 12(%rsp), %eax
leal 2(%rax), %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 16(%rsp), %rdi
call cudaMalloc@PLT
movl 12(%rsp), %eax
leal 2(%rax), %edx
movslq %edx, %rdx
salq $2, %rdx
movl $0, %esi
movq 16(%rsp), %rdi
call cudaMemset@PLT
movl 12(%rsp), %eax
leal 2(%rax), %edx
movslq %edx, %rdx
salq $2, %rdx
movl $1, %ecx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $16, 44(%rsp)
movl $1, 48(%rsp)
movl $8, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl 12(%rsp), %eax
leal 2(%rax), %edx
movslq %edx, %rdx
salq $2, %rdx
movl $2, %ecx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z28__device_stub__Z7__add__PiS_PiS_
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size call_device_sum, .-call_device_sum
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7__add__PiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7__add__PiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "add.hip"
.globl _Z22__device_stub____add__PiS_ # -- Begin function _Z22__device_stub____add__PiS_
.p2align 4, 0x90
.type _Z22__device_stub____add__PiS_,@function
_Z22__device_stub____add__PiS_: # @_Z22__device_stub____add__PiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z7__add__PiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z22__device_stub____add__PiS_, .Lfunc_end0-_Z22__device_stub____add__PiS_
.cfi_endproc
# -- End function
.globl call_device_sum # -- Begin function call_device_sum
.p2align 4, 0x90
.type call_device_sum,@function
call_device_sum: # @call_device_sum
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $112, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -16
movq %rdi, %rbx
movl %esi, 12(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq 24(%rsp), %rdi
movl $4, %edx
xorl %esi, %esi
callq hipMemset
movq 24(%rsp), %rdi
leaq 12(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movslq 12(%rsp), %rax
leaq 8(,%rax,4), %rsi
leaq 16(%rsp), %rdi
callq hipMalloc
movq 16(%rsp), %rdi
movslq 12(%rsp), %rax
leaq 8(,%rax,4), %rdx
xorl %esi, %esi
callq hipMemset
movq 16(%rsp), %rdi
movslq 12(%rsp), %rax
leaq 8(,%rax,4), %rdx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967304, %rdi # imm = 0x100000008
leaq 8(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 24(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7__add__PiS_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 16(%rsp), %rsi
movslq 12(%rsp), %rax
leaq 8(,%rax,4), %rdx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
addq $112, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size call_device_sum, .Lfunc_end1-call_device_sum
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7__add__PiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7__add__PiS_,@object # @_Z7__add__PiS_
.section .rodata,"a",@progbits
.globl _Z7__add__PiS_
.p2align 3, 0x0
_Z7__add__PiS_:
.quad _Z22__device_stub____add__PiS_
.size _Z7__add__PiS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7__add__PiS_"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub____add__PiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7__add__PiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* file name: convolution2D.cu
*
* CPE810A: Homework 3: Convolution
*
* Yupeng Cao, 10454637
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <ctime>
// Define constant memory for kernel storage on Device
#define KERNEL_RADIUS 128
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__constant__ float d_Kernel[KERNEL_W];
// Define Tile Size
#define TILE_W 16 // active cell width
#define TILE_H 16 // active cell height
#define TILE_SIZE (TILE_W + KERNEL_RADIUS * 2) * (TILE_W + KERNEL_RADIUS * 2)
#define UNROLL_INNER
clock_t start, row, col;
/*
*********************************************************************
Define Error Checking methods
cudaSafeCall: Check data allocate
cudaCheckError: Check kernel function execution
*********************************************************************
*/
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
/*
*********************************************************************
function name: convolutionRowGPU
parameters:
d_OutputRow: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionRowGPU(float* d_OutputRow, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[ TILE_H * (TILE_W + KERNEL_RADIUS * 2) ];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int x; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + blockIdx.x * blockDim.x;
const int shift = threadIdx.y * (TILE_W + dimK * 2);
// left
x = x0 - dimK;
if ( x < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - dimK];
// right
x = x0 + dimK;
if ( x > dimX-1 )
data[threadIdx.x + blockDim.x + shift] = 0;
else
data[threadIdx.x + blockDim.x + shift] = d_Input[gLoc + dimK];
__syncthreads();
// convolution
float sum = 0;
x = dimK + threadIdx.x;
for (int i = -dimK; i <= dimK; i++)
sum += data[x + i + shift] * d_Kernel[dimK + i];
d_OutputRow[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionCOlGPU
parameters:
d_OutputCol: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionColGPU(float* d_OutputCol, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[TILE_W * (TILE_H + KERNEL_RADIUS * 2)];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int y; // image based coordinate
// original image based coordinate
const int y0 = threadIdx.y + blockIdx.y * blockDim.y;
const int shift = threadIdx.y * (TILE_W);
// upper
y = y0 - dimK;
if ( y < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - (dimX * dimK)];
// lower
y = y0 + dimK;
const int shift1 = shift + (blockDim.y * TILE_W);
if ( y > dimY-1 )
data[threadIdx.x + shift1] = 0;
else
data[threadIdx.x + shift1] = d_Input[gLoc + (dimX * dimK)];
__syncthreads();
// convolution
float sum = 0;
for (int i = 0; i <= dimK*2; i++)
sum += data[threadIdx.x + (threadIdx.y + i) * TILE_W] * d_Kernel[i];
d_OutputCol[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionRowCPU
Do Row Convolution by using CPU
*********************************************************************
*/
void convolutionRowCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + i * ySize + (j + (c - kernel_size + 2)));
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
/*
*********************************************************************
function name: convolutionColCPU
Do Col Convolution by using CPU
*********************************************************************
*/
void convolutionColCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + (i + (c - kernel_size + 2)) * ySize + j);
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
int check_input(int dimX, int dimY, int dimK){
if (dimX > 0 && dimY > 0 && dimK > 0){
return 1;
}else{
printf("Input for dimX, dimY, dimK must larger than 0");
return -1;
}
}
/*
*********************************************************************
Main Function
*********************************************************************
*/
int main(int argc, char *argv[])
{
// Check input parameter
if (argc == 4){
printf("Input Data\n");
}else{
printf("Error input Parameter \n");
printf("Please Follow Format to Run Program: ./execute_file <dimX> <dimY> <dimK>\n");
printf("Please input <dimX>, <dimY>, <dimK> \n");
printf("dimX and dimY are width and heights for input image and dimK is size for mask \n");
return 0;
}
int dimX = atoi(argv[1]);
int dimY = atoi(argv[2]);
int dimK = atoi(argv[3]);
if (dimK > KERNEL_RADIUS){
printf("Your Mask Size is too large. \n");
printf("We recommend you change a reasonable number. \n");
}
if (check_input(dimX, dimY, dimK) == 1){
printf("Input is Valid \n\n");
}else{
return -1;
}
srand((unsigned)time(NULL));
// Initialize image size and kernel size
unsigned int img_size = dimX * dimY;
const int kernel_length = dimK;
// Allocate space for input on host
float* h_Kernel = (float *)malloc(kernel_length * sizeof(float));
float* h_Input = (float *)malloc(dimX * dimY * sizeof(float));
// Initialize Mask and Image.
for (unsigned int i = 0; i < kernel_length; ++i)
{
h_Kernel[i] = (float)(rand() % 16);
}
for (unsigned i = 0; i < img_size; ++i)
{
h_Input[i] = (float)(rand() % 16);
}
// Allocate space for saving results on host
float *h_OutputRowCPU, *h_OutputColCPU, *h_OutputRowGPU, *h_OutputColGPU;
h_OutputRowCPU = (float *)malloc(img_size * sizeof(float));
h_OutputColCPU = (float *)malloc(img_size * sizeof(float));
h_OutputRowGPU = (float *)malloc(img_size * sizeof(float));
h_OutputColGPU = (float *)malloc(img_size * sizeof(float));
// Allocate space for data on device
float *d_Input, *d_OutputRow, *d_OutputCol;
CudaSafeCall(cudaMalloc((void **)&d_Input, img_size * sizeof(float)));
CudaSafeCall(cudaMalloc((void **)&d_OutputRow, img_size * sizeof(float)));
CudaSafeCall(cudaMalloc((void **)&d_OutputCol, img_size * sizeof(float)));
// Move data from host to device
CudaSafeCall(cudaMemcpy(d_Input, h_Input, img_size * sizeof(float), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpyToSymbol(d_Kernel, h_Kernel, kernel_length));
// Initialize grid and block
dim3 blocks(TILE_W, TILE_H);
dim3 grids(dimX/TILE_W, dimY/TILE_H);
start = clock();
convolutionRowGPU<<<grids, blocks>>>(d_OutputRow, d_Input, dimX, dimY, dimK);
CudaCheckError();
cudaDeviceSynchronize();
row = clock();
double running_time = (double)(row - start) / CLOCKS_PER_SEC;
printf("Row Convolution by using GPU: %f ms.\n", running_time);
//start = clock();
convolutionColGPU<<<grids, blocks>>>(d_OutputCol, d_Input, dimX, dimY, dimK);
CudaCheckError();
cudaDeviceSynchronize();
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using GPU: %f ms.\n", running_time);
CudaSafeCall(cudaMemcpy(h_OutputRowGPU, d_OutputRow, img_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy(h_OutputColGPU, d_OutputCol, img_size, cudaMemcpyDeviceToHost));
//start = clock();
convolutionRowCPU(h_OutputRowCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Row Convolution by using CPU: %f ms.\n", running_time);
//start = clock();
convolutionColCPU(h_OutputColCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using CPU: %f ms.\n", running_time);
double computation_scale = static_cast<double>(dimX) * static_cast<double>(dimY) * static_cast<double>(dimK);
double throughput = (computation_scale * 1.0e-9f) / (running_time / 1000.0f);
printf("Throughput Performance: %f GFLOPs. \n", throughput);
cudaFree(d_OutputRow);
cudaFree(d_OutputCol);
cudaFree(d_Kernel);
cudaFreeHost(h_Kernel);
cudaFreeHost(h_Input);
cudaFreeHost(h_OutputRowGPU);
cudaFreeHost(h_OutputColGPU);
return 0;
} | .file "tmpxft_00044c89_00000000-6_convolution2D.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata._Z14__cudaSafeCall9cudaErrorPKci.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "cudaSafeCall() failed at %s:%i : %s\n"
.section .text._Z14__cudaSafeCall9cudaErrorPKci,"axG",@progbits,_Z14__cudaSafeCall9cudaErrorPKci,comdat
.weak _Z14__cudaSafeCall9cudaErrorPKci
.type _Z14__cudaSafeCall9cudaErrorPKci, @function
_Z14__cudaSafeCall9cudaErrorPKci:
.LFB2057:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbx
movl %edx, %ebp
call cudaGetErrorString@PLT
movq %rax, %r9
movl %ebp, %r8d
movq %rbx, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z14__cudaSafeCall9cudaErrorPKci, .-_Z14__cudaSafeCall9cudaErrorPKci
.section .rodata._Z16__cudaCheckErrorPKci.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "cudaCheckError() failed at %s:%i : %s\n"
.align 8
.LC2:
.string "cudaCheckError() with sync failed at %s:%i : %s\n"
.section .text._Z16__cudaCheckErrorPKci,"axG",@progbits,_Z16__cudaCheckErrorPKci,comdat
.weak _Z16__cudaCheckErrorPKci
.type _Z16__cudaCheckErrorPKci, @function
_Z16__cudaCheckErrorPKci:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movl %esi, %ebp
call cudaGetLastError@PLT
testl %eax, %eax
jne .L13
call cudaDeviceSynchronize@PLT
testl %eax, %eax
jne .L14
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl %ebp, %r8d
movq %rbx, %rcx
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L14:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl %ebp, %r8d
movq %rbx, %rcx
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z16__cudaCheckErrorPKci, .-_Z16__cudaCheckErrorPKci
.text
.globl _Z17convolutionRowCPUPfS_S_iii
.type _Z17convolutionRowCPUPfS_S_iii, @function
_Z17convolutionRowCPUPfS_S_iii:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 24(%rsp)
movq %rdx, 8(%rsp)
movslq %r9d, %r12
movabsq $2305843009213693950, %rax
cmpq %r12, %rax
jb .L16
movq %rsi, %rbx
movl %ecx, %r13d
movl %r8d, %ebp
movl %r9d, %r14d
leaq 0(,%r12,4), %rdi
call _Znam@PLT
movq %rax, %rdx
leal -2(%rbp), %r15d
movl %r14d, %eax
shrl $31, %eax
movl %eax, %r11d
addl %r14d, %r11d
sarl %r11d
subl %r11d, %r13d
cmpl %r13d, %r11d
jge .L15
movl %ebp, %r10d
imull %r11d, %r10d
leal -1(%r11), %r8d
imull %r15d, %r8d
negq %r12
leaq 0(,%r12,4), %r14
movl %r11d, %r9d
pxor %xmm2, %xmm2
cvtsi2sdl %r11d, %xmm2
pxor %xmm1, %xmm1
cvtsi2sdl %ebp, %xmm1
subsd %xmm2, %xmm1
movslq %r11d, %r12
movl %r11d, 20(%rsp)
movq 24(%rsp), %r11
jmp .L18
.L16:
call __cxa_throw_bad_array_new_length@PLT
.L28:
movss (%rdx), %xmm0
addss 4(%rdx), %xmm0
addss 8(%rdx), %xmm0
movss %xmm0, (%rdi)
addl $1, %esi
addq $4, %rdi
addq $4, %rcx
pxor %xmm0, %xmm0
cvtsi2sdl %esi, %xmm0
comisd %xmm0, %xmm1
jbe .L20
.L23:
movl $0, %eax
.L19:
movss (%rbx,%rax), %xmm0
mulss 8(%rcx,%rax), %xmm0
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq $12, %rax
jne .L19
jmp .L28
.L20:
addl $1, %r9d
addl %ebp, %r10d
addl %r15d, %r8d
cmpl %r13d, %r9d
je .L15
.L18:
comisd %xmm2, %xmm1
jbe .L20
movslq %r8d, %rax
addq %r12, %rax
leaq -4(%r11,%rax,4), %rdi
movslq %r10d, %rax
addq %r12, %rax
leaq (%r14,%rax,4), %rcx
movq 8(%rsp), %rax
addq %rax, %rcx
movl 20(%rsp), %esi
jmp .L23
.L15:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z17convolutionRowCPUPfS_S_iii, .-_Z17convolutionRowCPUPfS_S_iii
.globl _Z17convolutionColCPUPfS_S_iii
.type _Z17convolutionColCPUPfS_S_iii, @function
_Z17convolutionColCPUPfS_S_iii:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 8(%rsp)
movq %rdx, %r15
movslq %r9d, %rax
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L30
movq %rsi, %rbx
movl %ecx, %r13d
movl %r8d, %ebp
movl %r9d, %r12d
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %rcx
leal -2(%rbp), %r14d
movl %r12d, %r11d
shrl $31, %r11d
addl %r12d, %r11d
sarl %r11d
subl %r11d, %r13d
cmpl %r13d, %r11d
jge .L29
leal -1(%r11), %r10d
imull %r14d, %r10d
movl %r11d, %edx
subl %r12d, %edx
addl $2, %edx
imull %ebp, %edx
movslq %ebp, %r8
salq $2, %r8
movl %r11d, %r12d
pxor %xmm2, %xmm2
cvtsi2sdl %r11d, %xmm2
pxor %xmm1, %xmm1
cvtsi2sdl %ebp, %xmm1
subsd %xmm2, %xmm1
movl %r14d, 4(%rsp)
movl %r11d, %esi
jmp .L32
.L30:
call __cxa_throw_bad_array_new_length@PLT
.L42:
movss (%rcx), %xmm0
addss 4(%rcx), %xmm0
addss 8(%rcx), %xmm0
movss %xmm0, -4(%r11,%rdi,4)
leal 1(%rdi), %eax
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
addq $1, %rdi
addq $4, %r9
comisd %xmm0, %xmm1
jbe .L34
.L37:
movq %r9, %r14
movl $0, %eax
.L33:
movss (%rbx,%rax), %xmm0
mulss (%r14), %xmm0
movss %xmm0, (%rcx,%rax)
addq %r8, %r14
addq $4, %rax
cmpq $12, %rax
jne .L33
jmp .L42
.L34:
addl $1, %r12d
movl 4(%rsp), %eax
addl %eax, %r10d
addl %ebp, %edx
cmpl %r13d, %r12d
je .L29
.L32:
comisd %xmm2, %xmm1
jbe .L34
movslq %esi, %rdi
movslq %edx, %rax
addq %rdi, %rax
leaq (%r15,%rax,4), %r9
movslq %r10d, %rax
movq 8(%rsp), %r11
leaq (%r11,%rax,4), %r11
jmp .L37
.L29:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z17convolutionColCPUPfS_S_iii, .-_Z17convolutionColCPUPfS_S_iii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "Input for dimX, dimY, dimK must larger than 0"
.text
.globl _Z11check_inputiii
.type _Z11check_inputiii, @function
_Z11check_inputiii:
.LFB2061:
.cfi_startproc
endbr64
movl %edx, %eax
testl %esi, %esi
setg %dl
testl %eax, %eax
setg %al
testb %al, %dl
je .L47
movl $1, %eax
testl %edi, %edi
jle .L47
ret
.L47:
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _Z11check_inputiii, .-_Z11check_inputiii
.globl _Z42__device_stub__Z17convolutionRowGPUPfS_iiiPfS_iii
.type _Z42__device_stub__Z17convolutionRowGPUPfS_iiiPfS_iii, @function
_Z42__device_stub__Z17convolutionRowGPUPfS_iiiPfS_iii:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L55
.L51:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L56
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17convolutionRowGPUPfS_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L51
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z42__device_stub__Z17convolutionRowGPUPfS_iiiPfS_iii, .-_Z42__device_stub__Z17convolutionRowGPUPfS_iiiPfS_iii
.globl _Z17convolutionRowGPUPfS_iii
.type _Z17convolutionRowGPUPfS_iii, @function
_Z17convolutionRowGPUPfS_iii:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z17convolutionRowGPUPfS_iiiPfS_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z17convolutionRowGPUPfS_iii, .-_Z17convolutionRowGPUPfS_iii
.globl _Z42__device_stub__Z17convolutionColGPUPfS_iiiPfS_iii
.type _Z42__device_stub__Z17convolutionColGPUPfS_iiiPfS_iii, @function
_Z42__device_stub__Z17convolutionColGPUPfS_iiiPfS_iii:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L63
.L59:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L64
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17convolutionColGPUPfS_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L59
.L64:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z42__device_stub__Z17convolutionColGPUPfS_iiiPfS_iii, .-_Z42__device_stub__Z17convolutionColGPUPfS_iiiPfS_iii
.globl _Z17convolutionColGPUPfS_iii
.type _Z17convolutionColGPUPfS_iii, @function
_Z17convolutionColGPUPfS_iii:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z17convolutionColGPUPfS_iiiPfS_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z17convolutionColGPUPfS_iii, .-_Z17convolutionColGPUPfS_iii
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "Input Data\n"
.LC5:
.string "Error input Parameter \n"
.section .rodata.str1.8
.align 8
.LC6:
.string "Please Follow Format to Run Program: ./execute_file <dimX> <dimY> <dimK>\n"
.align 8
.LC7:
.string "Please input <dimX>, <dimY>, <dimK> \n"
.align 8
.LC8:
.string "dimX and dimY are width and heights for input image and dimK is size for mask \n"
.align 8
.LC9:
.string "Your Mask Size is too large. \n"
.align 8
.LC10:
.string "We recommend you change a reasonable number. \n"
.section .rodata.str1.1
.LC11:
.string "Input is Valid \n\n"
.section .rodata.str1.8
.align 8
.LC12:
.string "/home/ubuntu/Datasets/stackv2/train-structured/CYP0630/CUDA_Practice/master/Convolution2D/convolution2D.cu"
.align 8
.LC14:
.string "Row Convolution by using GPU: %f ms.\n"
.align 8
.LC17:
.string "Throughput Performance: %f GFLOPs. \n"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $136, %rsp
.cfi_def_cfa_offset 192
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
cmpl $4, %edi
jne .L68
movq %rsi, %rbx
leaq .LC4(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, 16(%rsp)
movl %eax, (%rsp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, 8(%rsp)
movl %eax, 4(%rsp)
movq 24(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movl %eax, %ebx
cmpl $128, %eax
jg .L83
.L70:
movl %ebx, %edx
movl 4(%rsp), %esi
movl (%rsp), %edi
call _Z11check_inputiii
cmpl $1, %eax
jne .L79
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl 16(%rsp), %r13d
movl 8(%rsp), %eax
imull %eax, %r13d
movslq %ebp, %rax
movq %rax, 40(%rsp)
leaq 0(,%rax,4), %rdi
call malloc@PLT
movq %rax, %r12
movslq %r13d, %rdi
salq $2, %rdi
call malloc@PLT
movq %rax, %r14
testl %ebp, %ebp
je .L72
movq %r12, %rbx
movl %ebp, %eax
leaq (%r12,%rax,4), %r15
.L73:
call rand@PLT
cltd
shrl $28, %edx
addl %edx, %eax
andl $15, %eax
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %r15, %rbx
jne .L73
.L72:
testl %r13d, %r13d
je .L74
movq %r14, %rbx
movl %r13d, %eax
leaq (%r14,%rax,4), %r15
.L75:
call rand@PLT
cltd
shrl $28, %edx
addl %edx, %eax
andl $15, %eax
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %r15, %rbx
jne .L75
.L74:
movl %r13d, %r15d
leaq 0(,%r15,4), %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, 48(%rsp)
movq %rbx, %rdi
call malloc@PLT
movq %rax, 56(%rsp)
movq %rbx, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movq %rbx, %rdi
call malloc@PLT
movq %rax, 32(%rsp)
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl %eax, %edi
movl $321, %edx
leaq .LC12(%rip), %r13
movq %r13, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq 80(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl %eax, %edi
movl $322, %edx
movq %r13, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
leaq 88(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl %eax, %edi
movl $323, %edx
movq %r13, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $326, %edx
movq %r13, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $1, %r8d
movl $0, %ecx
movq 40(%rsp), %rdx
movq %r12, %rsi
leaq _ZL8d_Kernel(%rip), %rdi
call cudaMemcpyToSymbol@PLT
movl %eax, %edi
movl $327, %edx
movq %r13, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $16, 96(%rsp)
movl $16, 100(%rsp)
movl $1, 104(%rsp)
movl $16, %ecx
movl (%rsp), %eax
cltd
idivl %ecx
movl %eax, 108(%rsp)
movl 4(%rsp), %eax
cltd
idivl %ecx
movl %eax, 112(%rsp)
movl $1, 116(%rsp)
call clock@PLT
movq %rax, start(%rip)
movl 104(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 96(%rsp), %rdx
movq 108(%rsp), %rdi
movl 116(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L84
.L76:
movl $335, %esi
leaq .LC12(%rip), %rdi
call _Z16__cudaCheckErrorPKci
call cudaDeviceSynchronize@PLT
call clock@PLT
movq %rax, row(%rip)
subq start(%rip), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC13(%rip), %xmm0
movq %xmm0, %r13
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl 104(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 96(%rsp), %rdx
movq 108(%rsp), %rdi
movl 116(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L85
.L77:
movl $343, %esi
leaq .LC12(%rip), %rbx
movq %rbx, %rdi
call _Z16__cudaCheckErrorPKci
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %r15, %rdx
movq 80(%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $349, %edx
movq %rbx, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl $2, %ecx
movq %r15, %rdx
movq 88(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $350, %edx
movq %rbx, %rsi
call _Z14__cudaSafeCall9cudaErrorPKci
movl %ebp, %r9d
movq 8(%rsp), %rbx
movl %ebx, %r8d
movl (%rsp), %r15d
movl %r15d, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq 48(%rsp), %rdi
call _Z17convolutionRowCPUPfS_S_iii
movl %ebp, %r9d
movl %ebx, %r8d
movl %r15d, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq 56(%rsp), %rdi
call _Z17convolutionColCPUPfS_S_iii
pxor %xmm0, %xmm0
cvtsi2sdl 16(%rsp), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdl %ebx, %xmm1
mulsd %xmm1, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdl %ebp, %xmm1
mulsd %xmm1, %xmm0
mulsd .LC15(%rip), %xmm0
movq %r13, %xmm1
divsd .LC16(%rip), %xmm1
divsd %xmm1, %xmm0
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
leaq _ZL8d_Kernel(%rip), %rdi
call cudaFree@PLT
movq %r12, %rdi
call cudaFreeHost@PLT
movq %r14, %rdi
call cudaFreeHost@PLT
movq 24(%rsp), %rdi
call cudaFreeHost@PLT
movq 32(%rsp), %rdi
call cudaFreeHost@PLT
movl $0, %eax
jmp .L67
.L68:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
.L67:
movq 120(%rsp), %rdx
subq %fs:40, %rdx
jne .L86
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L83:
.cfi_restore_state
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L70
.L84:
movl %ebp, %r8d
movl 4(%rsp), %ecx
movl (%rsp), %edx
movq 72(%rsp), %rsi
movq 80(%rsp), %rdi
call _Z42__device_stub__Z17convolutionRowGPUPfS_iiiPfS_iii
jmp .L76
.L85:
movl %ebp, %r8d
movl 4(%rsp), %ecx
movl (%rsp), %edx
movq 72(%rsp), %rsi
movq 88(%rsp), %rdi
call _Z42__device_stub__Z17convolutionColGPUPfS_iiiPfS_iii
jmp .L77
.L79:
movl $-1, %eax
jmp .L67
.L86:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2062:
.size main, .-main
.section .rodata.str1.1
.LC18:
.string "_Z17convolutionColGPUPfS_iii"
.LC19:
.string "_Z17convolutionRowGPUPfS_iii"
.LC20:
.string "d_Kernel"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2092:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z17convolutionColGPUPfS_iii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z17convolutionRowGPUPfS_iii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $1028, %r9d
movl $0, %r8d
leaq .LC20(%rip), %rdx
movq %rdx, %rcx
leaq _ZL8d_Kernel(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl col
.bss
.align 8
.type col, @object
.size col, 8
col:
.zero 8
.globl row
.align 8
.type row, @object
.size row, 8
row:
.zero 8
.globl start
.align 8
.type start, @object
.size start, 8
start:
.zero 8
.local _ZL8d_Kernel
.comm _ZL8d_Kernel,1028,32
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC13:
.long 0
.long 1093567616
.align 8
.LC15:
.long -536870912
.long 1041313291
.align 8
.LC16:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* file name: convolution2D.cu
*
* CPE810A: Homework 3: Convolution
*
* Yupeng Cao, 10454637
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <ctime>
// Define constant memory for kernel storage on Device
#define KERNEL_RADIUS 128
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__constant__ float d_Kernel[KERNEL_W];
// Define Tile Size
#define TILE_W 16 // active cell width
#define TILE_H 16 // active cell height
#define TILE_SIZE (TILE_W + KERNEL_RADIUS * 2) * (TILE_W + KERNEL_RADIUS * 2)
#define UNROLL_INNER
clock_t start, row, col;
/*
*********************************************************************
Define Error Checking methods
cudaSafeCall: Check data allocate
cudaCheckError: Check kernel function execution
*********************************************************************
*/
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
/*
*********************************************************************
function name: convolutionRowGPU
parameters:
d_OutputRow: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionRowGPU(float* d_OutputRow, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[ TILE_H * (TILE_W + KERNEL_RADIUS * 2) ];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int x; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + blockIdx.x * blockDim.x;
const int shift = threadIdx.y * (TILE_W + dimK * 2);
// left
x = x0 - dimK;
if ( x < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - dimK];
// right
x = x0 + dimK;
if ( x > dimX-1 )
data[threadIdx.x + blockDim.x + shift] = 0;
else
data[threadIdx.x + blockDim.x + shift] = d_Input[gLoc + dimK];
__syncthreads();
// convolution
float sum = 0;
x = dimK + threadIdx.x;
for (int i = -dimK; i <= dimK; i++)
sum += data[x + i + shift] * d_Kernel[dimK + i];
d_OutputRow[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionCOlGPU
parameters:
d_OutputCol: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionColGPU(float* d_OutputCol, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[TILE_W * (TILE_H + KERNEL_RADIUS * 2)];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int y; // image based coordinate
// original image based coordinate
const int y0 = threadIdx.y + blockIdx.y * blockDim.y;
const int shift = threadIdx.y * (TILE_W);
// upper
y = y0 - dimK;
if ( y < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - (dimX * dimK)];
// lower
y = y0 + dimK;
const int shift1 = shift + (blockDim.y * TILE_W);
if ( y > dimY-1 )
data[threadIdx.x + shift1] = 0;
else
data[threadIdx.x + shift1] = d_Input[gLoc + (dimX * dimK)];
__syncthreads();
// convolution
float sum = 0;
for (int i = 0; i <= dimK*2; i++)
sum += data[threadIdx.x + (threadIdx.y + i) * TILE_W] * d_Kernel[i];
d_OutputCol[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionRowCPU
Do Row Convolution by using CPU
*********************************************************************
*/
void convolutionRowCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + i * ySize + (j + (c - kernel_size + 2)));
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
/*
*********************************************************************
function name: convolutionColCPU
Do Col Convolution by using CPU
*********************************************************************
*/
void convolutionColCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + (i + (c - kernel_size + 2)) * ySize + j);
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
int check_input(int dimX, int dimY, int dimK){
if (dimX > 0 && dimY > 0 && dimK > 0){
return 1;
}else{
printf("Input for dimX, dimY, dimK must larger than 0");
return -1;
}
}
/*
*********************************************************************
Main Function
*********************************************************************
*/
int main(int argc, char *argv[])
{
// Check input parameter
if (argc == 4){
printf("Input Data\n");
}else{
printf("Error input Parameter \n");
printf("Please Follow Format to Run Program: ./execute_file <dimX> <dimY> <dimK>\n");
printf("Please input <dimX>, <dimY>, <dimK> \n");
printf("dimX and dimY are width and heights for input image and dimK is size for mask \n");
return 0;
}
int dimX = atoi(argv[1]);
int dimY = atoi(argv[2]);
int dimK = atoi(argv[3]);
if (dimK > KERNEL_RADIUS){
printf("Your Mask Size is too large. \n");
printf("We recommend you change a reasonable number. \n");
}
if (check_input(dimX, dimY, dimK) == 1){
printf("Input is Valid \n\n");
}else{
return -1;
}
srand((unsigned)time(NULL));
// Initialize image size and kernel size
unsigned int img_size = dimX * dimY;
const int kernel_length = dimK;
// Allocate space for input on host
float* h_Kernel = (float *)malloc(kernel_length * sizeof(float));
float* h_Input = (float *)malloc(dimX * dimY * sizeof(float));
// Initialize Mask and Image.
for (unsigned int i = 0; i < kernel_length; ++i)
{
h_Kernel[i] = (float)(rand() % 16);
}
for (unsigned i = 0; i < img_size; ++i)
{
h_Input[i] = (float)(rand() % 16);
}
// Allocate space for saving results on host
float *h_OutputRowCPU, *h_OutputColCPU, *h_OutputRowGPU, *h_OutputColGPU;
h_OutputRowCPU = (float *)malloc(img_size * sizeof(float));
h_OutputColCPU = (float *)malloc(img_size * sizeof(float));
h_OutputRowGPU = (float *)malloc(img_size * sizeof(float));
h_OutputColGPU = (float *)malloc(img_size * sizeof(float));
// Allocate space for data on device
float *d_Input, *d_OutputRow, *d_OutputCol;
CudaSafeCall(cudaMalloc((void **)&d_Input, img_size * sizeof(float)));
CudaSafeCall(cudaMalloc((void **)&d_OutputRow, img_size * sizeof(float)));
CudaSafeCall(cudaMalloc((void **)&d_OutputCol, img_size * sizeof(float)));
// Move data from host to device
CudaSafeCall(cudaMemcpy(d_Input, h_Input, img_size * sizeof(float), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpyToSymbol(d_Kernel, h_Kernel, kernel_length));
// Initialize grid and block
dim3 blocks(TILE_W, TILE_H);
dim3 grids(dimX/TILE_W, dimY/TILE_H);
start = clock();
convolutionRowGPU<<<grids, blocks>>>(d_OutputRow, d_Input, dimX, dimY, dimK);
CudaCheckError();
cudaDeviceSynchronize();
row = clock();
double running_time = (double)(row - start) / CLOCKS_PER_SEC;
printf("Row Convolution by using GPU: %f ms.\n", running_time);
//start = clock();
convolutionColGPU<<<grids, blocks>>>(d_OutputCol, d_Input, dimX, dimY, dimK);
CudaCheckError();
cudaDeviceSynchronize();
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using GPU: %f ms.\n", running_time);
CudaSafeCall(cudaMemcpy(h_OutputRowGPU, d_OutputRow, img_size, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy(h_OutputColGPU, d_OutputCol, img_size, cudaMemcpyDeviceToHost));
//start = clock();
convolutionRowCPU(h_OutputRowCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Row Convolution by using CPU: %f ms.\n", running_time);
//start = clock();
convolutionColCPU(h_OutputColCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using CPU: %f ms.\n", running_time);
double computation_scale = static_cast<double>(dimX) * static_cast<double>(dimY) * static_cast<double>(dimK);
double throughput = (computation_scale * 1.0e-9f) / (running_time / 1000.0f);
printf("Throughput Performance: %f GFLOPs. \n", throughput);
cudaFree(d_OutputRow);
cudaFree(d_OutputCol);
cudaFree(d_Kernel);
cudaFreeHost(h_Kernel);
cudaFreeHost(h_Input);
cudaFreeHost(h_OutputRowGPU);
cudaFreeHost(h_OutputColGPU);
return 0;
} | /*
* file name: convolution2D.cu
*
* CPE810A: Homework 3: Convolution
*
* Yupeng Cao, 10454637
*
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <ctime>
// Define constant memory for kernel storage on Device
#define KERNEL_RADIUS 128
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__constant__ float d_Kernel[KERNEL_W];
// Define Tile Size
#define TILE_W 16 // active cell width
#define TILE_H 16 // active cell height
#define TILE_SIZE (TILE_W + KERNEL_RADIUS * 2) * (TILE_W + KERNEL_RADIUS * 2)
#define UNROLL_INNER
clock_t start, row, col;
/*
*********************************************************************
Define Error Checking methods
cudaSafeCall: Check data allocate
cudaCheckError: Check kernel function execution
*********************************************************************
*/
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
/*
*********************************************************************
function name: convolutionRowGPU
parameters:
d_OutputRow: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionRowGPU(float* d_OutputRow, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[ TILE_H * (TILE_W + KERNEL_RADIUS * 2) ];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int x; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + blockIdx.x * blockDim.x;
const int shift = threadIdx.y * (TILE_W + dimK * 2);
// left
x = x0 - dimK;
if ( x < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - dimK];
// right
x = x0 + dimK;
if ( x > dimX-1 )
data[threadIdx.x + blockDim.x + shift] = 0;
else
data[threadIdx.x + blockDim.x + shift] = d_Input[gLoc + dimK];
__syncthreads();
// convolution
float sum = 0;
x = dimK + threadIdx.x;
for (int i = -dimK; i <= dimK; i++)
sum += data[x + i + shift] * d_Kernel[dimK + i];
d_OutputRow[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionCOlGPU
parameters:
d_OutputCol: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionColGPU(float* d_OutputCol, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[TILE_W * (TILE_H + KERNEL_RADIUS * 2)];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int y; // image based coordinate
// original image based coordinate
const int y0 = threadIdx.y + blockIdx.y * blockDim.y;
const int shift = threadIdx.y * (TILE_W);
// upper
y = y0 - dimK;
if ( y < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - (dimX * dimK)];
// lower
y = y0 + dimK;
const int shift1 = shift + (blockDim.y * TILE_W);
if ( y > dimY-1 )
data[threadIdx.x + shift1] = 0;
else
data[threadIdx.x + shift1] = d_Input[gLoc + (dimX * dimK)];
__syncthreads();
// convolution
float sum = 0;
for (int i = 0; i <= dimK*2; i++)
sum += data[threadIdx.x + (threadIdx.y + i) * TILE_W] * d_Kernel[i];
d_OutputCol[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionRowCPU
Do Row Convolution by using CPU
*********************************************************************
*/
void convolutionRowCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + i * ySize + (j + (c - kernel_size + 2)));
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
/*
*********************************************************************
function name: convolutionColCPU
Do Col Convolution by using CPU
*********************************************************************
*/
void convolutionColCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + (i + (c - kernel_size + 2)) * ySize + j);
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
int check_input(int dimX, int dimY, int dimK){
if (dimX > 0 && dimY > 0 && dimK > 0){
return 1;
}else{
printf("Input for dimX, dimY, dimK must larger than 0");
return -1;
}
}
/*
*********************************************************************
Main Function
*********************************************************************
*/
int main(int argc, char *argv[])
{
// Check input parameter
if (argc == 4){
printf("Input Data\n");
}else{
printf("Error input Parameter \n");
printf("Please Follow Format to Run Program: ./execute_file <dimX> <dimY> <dimK>\n");
printf("Please input <dimX>, <dimY>, <dimK> \n");
printf("dimX and dimY are width and heights for input image and dimK is size for mask \n");
return 0;
}
int dimX = atoi(argv[1]);
int dimY = atoi(argv[2]);
int dimK = atoi(argv[3]);
if (dimK > KERNEL_RADIUS){
printf("Your Mask Size is too large. \n");
printf("We recommend you change a reasonable number. \n");
}
if (check_input(dimX, dimY, dimK) == 1){
printf("Input is Valid \n\n");
}else{
return -1;
}
srand((unsigned)time(NULL));
// Initialize image size and kernel size
unsigned int img_size = dimX * dimY;
const int kernel_length = dimK;
// Allocate space for input on host
float* h_Kernel = (float *)malloc(kernel_length * sizeof(float));
float* h_Input = (float *)malloc(dimX * dimY * sizeof(float));
// Initialize Mask and Image.
for (unsigned int i = 0; i < kernel_length; ++i)
{
h_Kernel[i] = (float)(rand() % 16);
}
for (unsigned i = 0; i < img_size; ++i)
{
h_Input[i] = (float)(rand() % 16);
}
// Allocate space for saving results on host
float *h_OutputRowCPU, *h_OutputColCPU, *h_OutputRowGPU, *h_OutputColGPU;
h_OutputRowCPU = (float *)malloc(img_size * sizeof(float));
h_OutputColCPU = (float *)malloc(img_size * sizeof(float));
h_OutputRowGPU = (float *)malloc(img_size * sizeof(float));
h_OutputColGPU = (float *)malloc(img_size * sizeof(float));
// Allocate space for data on device
float *d_Input, *d_OutputRow, *d_OutputCol;
CudaSafeCall(hipMalloc((void **)&d_Input, img_size * sizeof(float)));
CudaSafeCall(hipMalloc((void **)&d_OutputRow, img_size * sizeof(float)));
CudaSafeCall(hipMalloc((void **)&d_OutputCol, img_size * sizeof(float)));
// Move data from host to device
CudaSafeCall(hipMemcpy(d_Input, h_Input, img_size * sizeof(float), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpyToSymbol(HIP_SYMBOL(d_Kernel), h_Kernel, kernel_length));
// Initialize grid and block
dim3 blocks(TILE_W, TILE_H);
dim3 grids(dimX/TILE_W, dimY/TILE_H);
start = clock();
convolutionRowGPU<<<grids, blocks>>>(d_OutputRow, d_Input, dimX, dimY, dimK);
CudaCheckError();
hipDeviceSynchronize();
row = clock();
double running_time = (double)(row - start) / CLOCKS_PER_SEC;
printf("Row Convolution by using GPU: %f ms.\n", running_time);
//start = clock();
convolutionColGPU<<<grids, blocks>>>(d_OutputCol, d_Input, dimX, dimY, dimK);
CudaCheckError();
hipDeviceSynchronize();
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using GPU: %f ms.\n", running_time);
CudaSafeCall(hipMemcpy(h_OutputRowGPU, d_OutputRow, img_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy(h_OutputColGPU, d_OutputCol, img_size, hipMemcpyDeviceToHost));
//start = clock();
convolutionRowCPU(h_OutputRowCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Row Convolution by using CPU: %f ms.\n", running_time);
//start = clock();
convolutionColCPU(h_OutputColCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using CPU: %f ms.\n", running_time);
double computation_scale = static_cast<double>(dimX) * static_cast<double>(dimY) * static_cast<double>(dimK);
double throughput = (computation_scale * 1.0e-9f) / (running_time / 1000.0f);
printf("Throughput Performance: %f GFLOPs. \n", throughput);
hipFree(d_OutputRow);
hipFree(d_OutputCol);
hipFree(d_Kernel);
hipHostFree(h_Kernel);
hipHostFree(h_Input);
hipHostFree(h_OutputRowGPU);
hipHostFree(h_OutputColGPU);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
* file name: convolution2D.cu
*
* CPE810A: Homework 3: Convolution
*
* Yupeng Cao, 10454637
*
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <ctime>
// Define constant memory for kernel storage on Device
#define KERNEL_RADIUS 128
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__constant__ float d_Kernel[KERNEL_W];
// Define Tile Size
#define TILE_W 16 // active cell width
#define TILE_H 16 // active cell height
#define TILE_SIZE (TILE_W + KERNEL_RADIUS * 2) * (TILE_W + KERNEL_RADIUS * 2)
#define UNROLL_INNER
clock_t start, row, col;
/*
*********************************************************************
Define Error Checking methods
cudaSafeCall: Check data allocate
cudaCheckError: Check kernel function execution
*********************************************************************
*/
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
/*
*********************************************************************
function name: convolutionRowGPU
parameters:
d_OutputRow: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionRowGPU(float* d_OutputRow, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[ TILE_H * (TILE_W + KERNEL_RADIUS * 2) ];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int x; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + blockIdx.x * blockDim.x;
const int shift = threadIdx.y * (TILE_W + dimK * 2);
// left
x = x0 - dimK;
if ( x < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - dimK];
// right
x = x0 + dimK;
if ( x > dimX-1 )
data[threadIdx.x + blockDim.x + shift] = 0;
else
data[threadIdx.x + blockDim.x + shift] = d_Input[gLoc + dimK];
__syncthreads();
// convolution
float sum = 0;
x = dimK + threadIdx.x;
for (int i = -dimK; i <= dimK; i++)
sum += data[x + i + shift] * d_Kernel[dimK + i];
d_OutputRow[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionCOlGPU
parameters:
d_OutputCol: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionColGPU(float* d_OutputCol, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[TILE_W * (TILE_H + KERNEL_RADIUS * 2)];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int y; // image based coordinate
// original image based coordinate
const int y0 = threadIdx.y + blockIdx.y * blockDim.y;
const int shift = threadIdx.y * (TILE_W);
// upper
y = y0 - dimK;
if ( y < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - (dimX * dimK)];
// lower
y = y0 + dimK;
const int shift1 = shift + (blockDim.y * TILE_W);
if ( y > dimY-1 )
data[threadIdx.x + shift1] = 0;
else
data[threadIdx.x + shift1] = d_Input[gLoc + (dimX * dimK)];
__syncthreads();
// convolution
float sum = 0;
for (int i = 0; i <= dimK*2; i++)
sum += data[threadIdx.x + (threadIdx.y + i) * TILE_W] * d_Kernel[i];
d_OutputCol[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionRowCPU
Do Row Convolution by using CPU
*********************************************************************
*/
void convolutionRowCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + i * ySize + (j + (c - kernel_size + 2)));
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
/*
*********************************************************************
function name: convolutionColCPU
Do Col Convolution by using CPU
*********************************************************************
*/
void convolutionColCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + (i + (c - kernel_size + 2)) * ySize + j);
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
int check_input(int dimX, int dimY, int dimK){
if (dimX > 0 && dimY > 0 && dimK > 0){
return 1;
}else{
printf("Input for dimX, dimY, dimK must larger than 0");
return -1;
}
}
/*
*********************************************************************
Main Function
*********************************************************************
*/
int main(int argc, char *argv[])
{
// Check input parameter
if (argc == 4){
printf("Input Data\n");
}else{
printf("Error input Parameter \n");
printf("Please Follow Format to Run Program: ./execute_file <dimX> <dimY> <dimK>\n");
printf("Please input <dimX>, <dimY>, <dimK> \n");
printf("dimX and dimY are width and heights for input image and dimK is size for mask \n");
return 0;
}
int dimX = atoi(argv[1]);
int dimY = atoi(argv[2]);
int dimK = atoi(argv[3]);
if (dimK > KERNEL_RADIUS){
printf("Your Mask Size is too large. \n");
printf("We recommend you change a reasonable number. \n");
}
if (check_input(dimX, dimY, dimK) == 1){
printf("Input is Valid \n\n");
}else{
return -1;
}
srand((unsigned)time(NULL));
// Initialize image size and kernel size
unsigned int img_size = dimX * dimY;
const int kernel_length = dimK;
// Allocate space for input on host
float* h_Kernel = (float *)malloc(kernel_length * sizeof(float));
float* h_Input = (float *)malloc(dimX * dimY * sizeof(float));
// Initialize Mask and Image.
for (unsigned int i = 0; i < kernel_length; ++i)
{
h_Kernel[i] = (float)(rand() % 16);
}
for (unsigned i = 0; i < img_size; ++i)
{
h_Input[i] = (float)(rand() % 16);
}
// Allocate space for saving results on host
float *h_OutputRowCPU, *h_OutputColCPU, *h_OutputRowGPU, *h_OutputColGPU;
h_OutputRowCPU = (float *)malloc(img_size * sizeof(float));
h_OutputColCPU = (float *)malloc(img_size * sizeof(float));
h_OutputRowGPU = (float *)malloc(img_size * sizeof(float));
h_OutputColGPU = (float *)malloc(img_size * sizeof(float));
// Allocate space for data on device
float *d_Input, *d_OutputRow, *d_OutputCol;
CudaSafeCall(hipMalloc((void **)&d_Input, img_size * sizeof(float)));
CudaSafeCall(hipMalloc((void **)&d_OutputRow, img_size * sizeof(float)));
CudaSafeCall(hipMalloc((void **)&d_OutputCol, img_size * sizeof(float)));
// Move data from host to device
CudaSafeCall(hipMemcpy(d_Input, h_Input, img_size * sizeof(float), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpyToSymbol(HIP_SYMBOL(d_Kernel), h_Kernel, kernel_length));
// Initialize grid and block
dim3 blocks(TILE_W, TILE_H);
dim3 grids(dimX/TILE_W, dimY/TILE_H);
start = clock();
convolutionRowGPU<<<grids, blocks>>>(d_OutputRow, d_Input, dimX, dimY, dimK);
CudaCheckError();
hipDeviceSynchronize();
row = clock();
double running_time = (double)(row - start) / CLOCKS_PER_SEC;
printf("Row Convolution by using GPU: %f ms.\n", running_time);
//start = clock();
convolutionColGPU<<<grids, blocks>>>(d_OutputCol, d_Input, dimX, dimY, dimK);
CudaCheckError();
hipDeviceSynchronize();
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using GPU: %f ms.\n", running_time);
CudaSafeCall(hipMemcpy(h_OutputRowGPU, d_OutputRow, img_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy(h_OutputColGPU, d_OutputCol, img_size, hipMemcpyDeviceToHost));
//start = clock();
convolutionRowCPU(h_OutputRowCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Row Convolution by using CPU: %f ms.\n", running_time);
//start = clock();
convolutionColCPU(h_OutputColCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using CPU: %f ms.\n", running_time);
double computation_scale = static_cast<double>(dimX) * static_cast<double>(dimY) * static_cast<double>(dimK);
double throughput = (computation_scale * 1.0e-9f) / (running_time / 1000.0f);
printf("Throughput Performance: %f GFLOPs. \n", throughput);
hipFree(d_OutputRow);
hipFree(d_OutputCol);
hipFree(d_Kernel);
hipHostFree(h_Kernel);
hipHostFree(h_Input);
hipHostFree(h_OutputRowGPU);
hipHostFree(h_OutputColGPU);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17convolutionRowGPUPfS_iii
.globl _Z17convolutionRowGPUPfS_iii
.p2align 8
.type _Z17convolutionRowGPUPfS_iii,@function
_Z17convolutionRowGPUPfS_iii:
s_clause 0x3
s_load_b32 s6, s[0:1], 0x2c
s_load_b32 s7, s[0:1], 0x10
s_load_b32 s4, s[0:1], 0x18
s_load_b64 s[2:3], s[0:1], 0x8
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v5, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s6, 0xffff
s_lshr_b32 s6, s6, 16
v_mad_u64_u32 v[3:4], null, s14, s5, v[2:3]
v_mad_u64_u32 v[6:7], null, s15, s6, v[5:6]
s_mov_b32 s6, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, v6, s7, v[3:4]
v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v6, 0
v_cmpx_le_i32_e64 s4, v3
s_cbranch_execz .LBB0_2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_subrev_nc_u32_e32 v6, s4, v0
v_ashrrev_i32_e32 v7, 31, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_add_co_u32 v6, vcc_lo, s2, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b32 v6, v[6:7], off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s6
s_lshl_b32 s6, s4, 1
v_add_nc_u32_e32 v3, s4, v3
s_add_i32 s8, s6, 16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v4, s8, v5
v_cmp_gt_i32_e32 vcc_lo, s7, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_lshl_u32 v5, v4, v2, 2
s_waitcnt vmcnt(0)
ds_store_b32 v5, v6
s_and_saveexec_b32 s7, vcc_lo
s_cbranch_execz .LBB0_4
v_add_nc_u32_e32 v5, s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, s2, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v6, vcc_lo
global_load_b32 v1, v[5:6], off
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s7
v_add_nc_u32_e32 v3, v4, v2
s_mov_b32 s2, 0
s_cmp_lt_i32 s4, 0
s_delay_alu instid0(VALU_DEP_1)
v_add_lshl_u32 v5, v3, s5, 2
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
ds_store_b32 v5, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_7
v_lshlrev_b32_e32 v1, 2, v2
v_mov_b32_e32 v3, 0
s_add_i32 s6, s6, 1
s_delay_alu instid0(VALU_DEP_2)
v_lshl_add_u32 v1, v4, 2, v1
.p2align 6
.LBB0_6:
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, d_Kernel@rel32@lo+4
s_addc_u32 s5, s5, d_Kernel@rel32@hi+12
s_ashr_i32 s3, s2, 31
ds_load_b32 v2, v1
s_lshl_b64 s[8:9], s[2:3], 2
v_add_nc_u32_e32 v1, 4, v1
s_add_u32 s4, s8, s4
s_addc_u32 s5, s9, s5
s_add_i32 s2, s2, 1
s_load_b32 s3, s[4:5], 0x0
s_cmp_eq_u32 s6, s2
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v3, s3, v2
s_cbranch_scc0 .LBB0_6
.LBB0_7:
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v3, off
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17convolutionRowGPUPfS_iii
.amdhsa_group_segment_fixed_size 17408
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17convolutionRowGPUPfS_iii, .Lfunc_end0-_Z17convolutionRowGPUPfS_iii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z17convolutionColGPUPfS_iii
.globl _Z17convolutionColGPUPfS_iii
.p2align 8
.type _Z17convolutionColGPUPfS_iii,@function
_Z17convolutionColGPUPfS_iii:
s_clause 0x2
s_load_b32 s7, s[0:1], 0x2c
s_load_b32 s6, s[0:1], 0x10
s_load_b32 s4, s[0:1], 0x18
v_bfe_u32 v1, v0, 10, 10
s_load_b64 s[2:3], s[0:1], 0x8
v_mov_b32_e32 v4, 0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s7, 16
s_and_b32 s7, s7, 0xffff
v_mad_u64_u32 v[2:3], null, s15, s5, v[1:2]
v_and_b32_e32 v3, 0x3ff, v0
s_mul_i32 s14, s14, s7
s_mov_b32 s7, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v5, v2, s6
v_add3_u32 v0, s14, v3, v5
v_mov_b32_e32 v5, 0
v_cmpx_le_i32_e64 s4, v2
s_cbranch_execz .LBB1_2
s_mul_i32 s8, s4, s6
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_subrev_nc_u32_e32 v5, s8, v0
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[5:6]
v_add_co_u32 v5, vcc_lo, s2, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v6, vcc_lo
global_load_b32 v5, v[5:6], off
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s7
s_load_b32 s7, s[0:1], 0x14
v_lshlrev_b32_e32 v6, 4, v1
v_add_nc_u32_e32 v2, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_lshl_u32 v6, v6, v3, 2
s_waitcnt vmcnt(0)
ds_store_b32 v6, v5
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s7, v2
s_and_saveexec_b32 s7, vcc_lo
s_cbranch_execz .LBB1_4
v_mad_u64_u32 v[4:5], null, s4, s6, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s2, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
global_load_b32 v4, v[4:5], off
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s7
v_add_lshl_u32 v2, v1, s5, 4
s_cmp_lt_i32 s4, 0
s_delay_alu instid0(VALU_DEP_1)
v_add_lshl_u32 v5, v2, v3, 2
v_mov_b32_e32 v2, 0
s_waitcnt vmcnt(0)
ds_store_b32 v5, v4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_7
v_lshlrev_b32_e32 v2, 2, v3
s_lshl_b32 s4, s4, 1
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, d_Kernel@rel32@lo+4
s_addc_u32 s3, s3, d_Kernel@rel32@hi+12
s_max_i32 s4, s4, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s4, s4, 1
v_lshl_add_u32 v1, v1, 6, v2
v_mov_b32_e32 v2, 0
.LBB1_6:
ds_load_b32 v3, v1
s_load_b32 s5, s[2:3], 0x0
s_add_i32 s4, s4, -1
v_add_nc_u32_e32 v1, 64, v1
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmp_eq_u32 s4, 0
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v2, s5, v3
s_cbranch_scc0 .LBB1_6
.LBB1_7:
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17convolutionColGPUPfS_iii
.amdhsa_group_segment_fixed_size 17408
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z17convolutionColGPUPfS_iii, .Lfunc_end1-_Z17convolutionColGPUPfS_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected d_Kernel
.type d_Kernel,@object
.section .bss,"aw",@nobits
.globl d_Kernel
.p2align 4, 0x0
d_Kernel:
.zero 1028
.size d_Kernel, 1028
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym d_Kernel
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 17408
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17convolutionRowGPUPfS_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17convolutionRowGPUPfS_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 17408
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17convolutionColGPUPfS_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17convolutionColGPUPfS_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
* file name: convolution2D.cu
*
* CPE810A: Homework 3: Convolution
*
* Yupeng Cao, 10454637
*
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <ctime>
// Define constant memory for kernel storage on Device
#define KERNEL_RADIUS 128
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__constant__ float d_Kernel[KERNEL_W];
// Define Tile Size
#define TILE_W 16 // active cell width
#define TILE_H 16 // active cell height
#define TILE_SIZE (TILE_W + KERNEL_RADIUS * 2) * (TILE_W + KERNEL_RADIUS * 2)
#define UNROLL_INNER
clock_t start, row, col;
/*
*********************************************************************
Define Error Checking methods
cudaSafeCall: Check data allocate
cudaCheckError: Check kernel function execution
*********************************************************************
*/
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
/*
*********************************************************************
function name: convolutionRowGPU
parameters:
d_OutputRow: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionRowGPU(float* d_OutputRow, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[ TILE_H * (TILE_W + KERNEL_RADIUS * 2) ];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int x; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + blockIdx.x * blockDim.x;
const int shift = threadIdx.y * (TILE_W + dimK * 2);
// left
x = x0 - dimK;
if ( x < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - dimK];
// right
x = x0 + dimK;
if ( x > dimX-1 )
data[threadIdx.x + blockDim.x + shift] = 0;
else
data[threadIdx.x + blockDim.x + shift] = d_Input[gLoc + dimK];
__syncthreads();
// convolution
float sum = 0;
x = dimK + threadIdx.x;
for (int i = -dimK; i <= dimK; i++)
sum += data[x + i + shift] * d_Kernel[dimK + i];
d_OutputRow[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionCOlGPU
parameters:
d_OutputCol: Space for saving results
d_Input: Input image
dimX: Width
dimY: Height
dimK: Kernel Size
*********************************************************************
*/
__global__ void convolutionColGPU(float* d_OutputCol, float* d_Input, int dimX, int dimY, int dimK)
{
// Data cache: threadIdx.x , threadIdx.y
__shared__ float data[TILE_W * (TILE_H + KERNEL_RADIUS * 2)];
// global mem address of this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dimX +
blockIdx.y * blockDim.y * dimX;
int y; // image based coordinate
// original image based coordinate
const int y0 = threadIdx.y + blockIdx.y * blockDim.y;
const int shift = threadIdx.y * (TILE_W);
// upper
y = y0 - dimK;
if ( y < 0 )
data[threadIdx.x + shift] = 0;
else
data[threadIdx.x + shift] = d_Input[ gLoc - (dimX * dimK)];
// lower
y = y0 + dimK;
const int shift1 = shift + (blockDim.y * TILE_W);
if ( y > dimY-1 )
data[threadIdx.x + shift1] = 0;
else
data[threadIdx.x + shift1] = d_Input[gLoc + (dimX * dimK)];
__syncthreads();
// convolution
float sum = 0;
for (int i = 0; i <= dimK*2; i++)
sum += data[threadIdx.x + (threadIdx.y + i) * TILE_W] * d_Kernel[i];
d_OutputCol[gLoc] = sum;
__syncthreads();
}
/*
*********************************************************************
function name: convolutionRowCPU
Do Row Convolution by using CPU
*********************************************************************
*/
void convolutionRowCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + i * ySize + (j + (c - kernel_size + 2)));
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
/*
*********************************************************************
function name: convolutionColCPU
Do Col Convolution by using CPU
*********************************************************************
*/
void convolutionColCPU(float* output, float* kernel, float* input, int xSize, int ySize, int kernel_size)
{
float* temp = new float[kernel_size];
int outCol = ySize - 2;
for (int i = floor(kernel_size / 2); i < xSize - (kernel_size / 2); i++)
{
for (int j = floor(kernel_size / 2); j < ySize - floor(kernel_size / 2); j++)
{
for (int c = 0; c < 3; c++)
{
*(temp + c) = *(kernel + c) * *(input + (i + (c - kernel_size + 2)) * ySize + j);
}
*(output + (i - 1) * outCol + (j - 1)) = *(temp + 0) + *(temp + 1) + *(temp + 2);
}
}
}
int check_input(int dimX, int dimY, int dimK){
if (dimX > 0 && dimY > 0 && dimK > 0){
return 1;
}else{
printf("Input for dimX, dimY, dimK must larger than 0");
return -1;
}
}
/*
*********************************************************************
Main Function
*********************************************************************
*/
int main(int argc, char *argv[])
{
// Check input parameter
if (argc == 4){
printf("Input Data\n");
}else{
printf("Error input Parameter \n");
printf("Please Follow Format to Run Program: ./execute_file <dimX> <dimY> <dimK>\n");
printf("Please input <dimX>, <dimY>, <dimK> \n");
printf("dimX and dimY are width and heights for input image and dimK is size for mask \n");
return 0;
}
int dimX = atoi(argv[1]);
int dimY = atoi(argv[2]);
int dimK = atoi(argv[3]);
if (dimK > KERNEL_RADIUS){
printf("Your Mask Size is too large. \n");
printf("We recommend you change a reasonable number. \n");
}
if (check_input(dimX, dimY, dimK) == 1){
printf("Input is Valid \n\n");
}else{
return -1;
}
srand((unsigned)time(NULL));
// Initialize image size and kernel size
unsigned int img_size = dimX * dimY;
const int kernel_length = dimK;
// Allocate space for input on host
float* h_Kernel = (float *)malloc(kernel_length * sizeof(float));
float* h_Input = (float *)malloc(dimX * dimY * sizeof(float));
// Initialize Mask and Image.
for (unsigned int i = 0; i < kernel_length; ++i)
{
h_Kernel[i] = (float)(rand() % 16);
}
for (unsigned i = 0; i < img_size; ++i)
{
h_Input[i] = (float)(rand() % 16);
}
// Allocate space for saving results on host
float *h_OutputRowCPU, *h_OutputColCPU, *h_OutputRowGPU, *h_OutputColGPU;
h_OutputRowCPU = (float *)malloc(img_size * sizeof(float));
h_OutputColCPU = (float *)malloc(img_size * sizeof(float));
h_OutputRowGPU = (float *)malloc(img_size * sizeof(float));
h_OutputColGPU = (float *)malloc(img_size * sizeof(float));
// Allocate space for data on device
float *d_Input, *d_OutputRow, *d_OutputCol;
CudaSafeCall(hipMalloc((void **)&d_Input, img_size * sizeof(float)));
CudaSafeCall(hipMalloc((void **)&d_OutputRow, img_size * sizeof(float)));
CudaSafeCall(hipMalloc((void **)&d_OutputCol, img_size * sizeof(float)));
// Move data from host to device
CudaSafeCall(hipMemcpy(d_Input, h_Input, img_size * sizeof(float), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpyToSymbol(HIP_SYMBOL(d_Kernel), h_Kernel, kernel_length));
// Initialize grid and block
dim3 blocks(TILE_W, TILE_H);
dim3 grids(dimX/TILE_W, dimY/TILE_H);
start = clock();
convolutionRowGPU<<<grids, blocks>>>(d_OutputRow, d_Input, dimX, dimY, dimK);
CudaCheckError();
hipDeviceSynchronize();
row = clock();
double running_time = (double)(row - start) / CLOCKS_PER_SEC;
printf("Row Convolution by using GPU: %f ms.\n", running_time);
//start = clock();
convolutionColGPU<<<grids, blocks>>>(d_OutputCol, d_Input, dimX, dimY, dimK);
CudaCheckError();
hipDeviceSynchronize();
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using GPU: %f ms.\n", running_time);
CudaSafeCall(hipMemcpy(h_OutputRowGPU, d_OutputRow, img_size, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy(h_OutputColGPU, d_OutputCol, img_size, hipMemcpyDeviceToHost));
//start = clock();
convolutionRowCPU(h_OutputRowCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Row Convolution by using CPU: %f ms.\n", running_time);
//start = clock();
convolutionColCPU(h_OutputColCPU, h_Kernel, h_Input, dimX, dimY, dimK);
//row = clock();
//double running_time = (double)(row - start) / CLOCKS_PER_SEC;
//printf("Col Convolution by using CPU: %f ms.\n", running_time);
double computation_scale = static_cast<double>(dimX) * static_cast<double>(dimY) * static_cast<double>(dimK);
double throughput = (computation_scale * 1.0e-9f) / (running_time / 1000.0f);
printf("Throughput Performance: %f GFLOPs. \n", throughput);
hipFree(d_OutputRow);
hipFree(d_OutputCol);
hipFree(d_Kernel);
hipHostFree(h_Kernel);
hipHostFree(h_Input);
hipHostFree(h_OutputRowGPU);
hipHostFree(h_OutputColGPU);
return 0;
} | .text
.file "convolution2D.hip"
.globl _Z32__device_stub__convolutionRowGPUPfS_iii # -- Begin function _Z32__device_stub__convolutionRowGPUPfS_iii
.p2align 4, 0x90
.type _Z32__device_stub__convolutionRowGPUPfS_iii,@function
_Z32__device_stub__convolutionRowGPUPfS_iii: # @_Z32__device_stub__convolutionRowGPUPfS_iii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17convolutionRowGPUPfS_iii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z32__device_stub__convolutionRowGPUPfS_iii, .Lfunc_end0-_Z32__device_stub__convolutionRowGPUPfS_iii
.cfi_endproc
# -- End function
.globl _Z32__device_stub__convolutionColGPUPfS_iii # -- Begin function _Z32__device_stub__convolutionColGPUPfS_iii
.p2align 4, 0x90
.type _Z32__device_stub__convolutionColGPUPfS_iii,@function
_Z32__device_stub__convolutionColGPUPfS_iii: # @_Z32__device_stub__convolutionColGPUPfS_iii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17convolutionColGPUPfS_iii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z32__device_stub__convolutionColGPUPfS_iii, .Lfunc_end1-_Z32__device_stub__convolutionColGPUPfS_iii
.cfi_endproc
# -- End function
.globl _Z17convolutionRowCPUPfS_S_iii # -- Begin function _Z17convolutionRowCPUPfS_S_iii
.p2align 4, 0x90
.type _Z17convolutionRowCPUPfS_S_iii,@function
_Z17convolutionRowCPUPfS_S_iii: # @_Z17convolutionRowCPUPfS_S_iii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %ebp
movl %r8d, %ebx
movl %ecx, %r12d
movq %rdx, 16(%rsp) # 8-byte Spill
movq %rsi, %r14
movq %rdi, 8(%rsp) # 8-byte Spill
movslq %r9d, %r13
leaq (,%r13,4), %rax
testl %r13d, %r13d
movq $-1, %rdi
cmovnsq %rax, %rdi
callq _Znam
shrl $31, %r13d
addl %ebp, %r13d
sarl %r13d
subl %r13d, %r12d
cmpl %r12d, %r13d
jge .LBB2_8
# %bb.1: # %.preheader32.lr.ph
addq $-4, 8(%rsp) # 8-byte Folded Spill
movslq %r13d, %rcx
movslq %ebx, %rdx
addl $-2, %ebx
movq %rdx, %rsi
subq %rcx, %rsi
movslq %ebx, %rdi
movslq %r12d, %r8
subl %ebp, %r13d
addl $2, %r13d
movq %rcx, %r9
jmp .LBB2_2
.p2align 4, 0x90
.LBB2_7: # %._crit_edge
# in Loop: Header=BB2_2 Depth=1
incq %r9
cmpq %r8, %r9
je .LBB2_8
.LBB2_2: # %.preheader32
# =>This Loop Header: Depth=1
# Child Loop BB2_4 Depth 2
# Child Loop BB2_5 Depth 3
cmpq %rcx, %rsi
jle .LBB2_7
# %bb.3: # %.preheader.lr.ph
# in Loop: Header=BB2_2 Depth=1
movq %r9, %r10
imulq %rdx, %r10
movq 16(%rsp), %r11 # 8-byte Reload
leaq (%r11,%r10,4), %r10
leaq -1(%r9), %r11
imulq %rdi, %r11
movq 8(%rsp), %rbx # 8-byte Reload
leaq (%rbx,%r11,4), %r11
movq %r13, %r12
movq %rcx, %rbp
.p2align 4, 0x90
.LBB2_4: # %.preheader
# Parent Loop BB2_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_5 Depth 3
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_5: # Parent Loop BB2_2 Depth=1
# Parent Loop BB2_4 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r14,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
leal (%r12,%rbx), %r15d
movslq %r15d, %r15
mulss (%r10,%r15,4), %xmm0
movss %xmm0, (%rax,%rbx,4)
incq %rbx
cmpq $3, %rbx
jne .LBB2_5
# %bb.6: # in Loop: Header=BB2_4 Depth=2
movss (%rax), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss 4(%rax), %xmm0
addss 8(%rax), %xmm0
movss %xmm0, (%r11,%rbp,4)
incq %rbp
movslq %ebp, %rbx
incq %r12
cmpq %rbx, %rsi
jg .LBB2_4
jmp .LBB2_7
.LBB2_8: # %._crit_edge38
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z17convolutionRowCPUPfS_S_iii, .Lfunc_end2-_Z17convolutionRowCPUPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z17convolutionColCPUPfS_S_iii # -- Begin function _Z17convolutionColCPUPfS_S_iii
.p2align 4, 0x90
.type _Z17convolutionColCPUPfS_S_iii,@function
_Z17convolutionColCPUPfS_S_iii: # @_Z17convolutionColCPUPfS_S_iii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r12d
movl %r8d, %ebx
movl %ecx, %r13d
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, (%rsp) # 8-byte Spill
movslq %r9d, %rbp
leaq (,%rbp,4), %rax
testl %ebp, %ebp
movq $-1, %rdi
cmovnsq %rax, %rdi
callq _Znam
shrl $31, %ebp
addl %r12d, %ebp
sarl %ebp
subl %ebp, %r13d
cmpl %r13d, %ebp
jge .LBB3_8
# %bb.1: # %.preheader32.lr.ph
addq $-4, (%rsp) # 8-byte Folded Spill
leal -2(%rbx), %edi
movslq %ebp, %rcx
movslq %ebx, %rdx
movq %rdx, %rsi
subq %rcx, %rsi
movslq %edi, %rdi
movslq %r13d, %r8
subl %r12d, %ebp
addl $2, %ebp
imull %ebp, %edx
movq %rcx, %r9
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_7: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
incq %r9
addl %ebx, %edx
cmpq %r8, %r9
je .LBB3_8
.LBB3_2: # %.preheader32
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
# Child Loop BB3_5 Depth 3
cmpq %rcx, %rsi
jle .LBB3_7
# %bb.3: # %.preheader.lr.ph
# in Loop: Header=BB3_2 Depth=1
leaq -1(%r9), %r10
imulq %rdi, %r10
movq (%rsp), %r11 # 8-byte Reload
leaq (%r11,%r10,4), %r10
movq %rcx, %r11
.p2align 4, 0x90
.LBB3_4: # %.preheader
# Parent Loop BB3_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_5 Depth 3
leaq (%r14,%r11,4), %r13
movl %edx, %ebp
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_5: # Parent Loop BB3_2 Depth=1
# Parent Loop BB3_4 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movslq %ebp, %rbp
mulss (%r13,%rbp,4), %xmm0
movss %xmm0, (%rax,%r12,4)
incq %r12
addl %ebx, %ebp
cmpq $3, %r12
jne .LBB3_5
# %bb.6: # in Loop: Header=BB3_4 Depth=2
movss (%rax), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss 4(%rax), %xmm0
addss 8(%rax), %xmm0
movss %xmm0, (%r10,%r11,4)
incq %r11
movslq %r11d, %r12
cmpq %r12, %rsi
jg .LBB3_4
jmp .LBB3_7
.LBB3_8: # %._crit_edge40
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z17convolutionColCPUPfS_S_iii, .Lfunc_end3-_Z17convolutionColCPUPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z11check_inputiii # -- Begin function _Z11check_inputiii
.p2align 4, 0x90
.type _Z11check_inputiii,@function
_Z11check_inputiii: # @_Z11check_inputiii
.cfi_startproc
# %bb.0:
testl %edi, %edi
jle .LBB4_3
# %bb.1:
testl %esi, %esi
jle .LBB4_3
# %bb.2:
movl $1, %eax
testl %edx, %edx
jle .LBB4_3
# %bb.4:
retq
.LBB4_3:
pushq %rax
.cfi_def_cfa_offset 16
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $-1, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z11check_inputiii, .Lfunc_end4-_Z11check_inputiii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI5_0:
.quad 0x412e848000000000 # double 1.0E+6
.LCPI5_1:
.quad 0x3e112e0be0000000 # double 9.9999997171806853E-10
.LCPI5_2:
.quad 0x408f400000000000 # double 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $4, %edi
jne .LBB5_43
# %bb.1:
movq %rsi, %r15
movl $.Lstr.4, %edi
callq puts@PLT
movq 8(%r15), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movq 16(%r15), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movq 24(%r15), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
cmpl $128, %r15d
jle .LBB5_3
# %bb.2:
movl $.Lstr.5, %edi
callq puts@PLT
movl $.Lstr.6, %edi
callq puts@PLT
.LBB5_3:
testl %ebx, %ebx
jle .LBB5_6
# %bb.4:
testl %r12d, %r12d
jle .LBB5_6
# %bb.5:
testl %r15d, %r15d
jle .LBB5_6
# %bb.7: # %.critedge
movl $.Lstr.7, %edi
callq puts@PLT
xorl %r14d, %r14d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
movq %r12, 192(%rsp) # 8-byte Spill
movl %r12d, %ebp
movq %rbx, 16(%rsp) # 8-byte Spill
imull %ebx, %ebp
movq %r15, %rbx
shlq $32, %r15
movq %r15, %rdi
sarq $30, %rdi
callq malloc
movq %rax, %r12
movslq %ebp, %rdi
shlq $2, %rdi
callq malloc
movq %rax, %r13
movq %rbx, 112(%rsp) # 8-byte Spill
movl %ebx, %ebx
.p2align 4, 0x90
.LBB5_8: # =>This Inner Loop Header: Depth=1
callq rand
# kill: def $eax killed $eax def $rax
leal 15(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-16, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r12,%r14,4)
incq %r14
cmpq %r14, %rbx
jne .LBB5_8
# %bb.9: # %.preheader.preheader
cmpl $1, %ebp
movl %ebp, %r14d
adcl $0, %r14d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB5_10: # %.preheader
# =>This Inner Loop Header: Depth=1
callq rand
# kill: def $eax killed $eax def $rax
leal 15(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-16, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r13,%rbx,4)
incq %rbx
cmpq %rbx, %r14
jne .LBB5_10
# %bb.11:
movl %ebp, %r14d
leaq (,%r14,4), %rbp
movq %rbp, %rdi
callq malloc
movq %rax, %rbx
movq %rbp, %rdi
callq malloc
movq %rax, 184(%rsp) # 8-byte Spill
leaq 40(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB5_12
# %bb.13: # %_Z14__cudaSafeCall10hipError_tPKci.exit
leaq 32(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB5_14
# %bb.15: # %_Z14__cudaSafeCall10hipError_tPKci.exit97
leaq 24(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB5_16
# %bb.17: # %_Z14__cudaSafeCall10hipError_tPKci.exit99
movq %r14, 176(%rsp) # 8-byte Spill
movq 40(%rsp), %rdi
movq %r13, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB5_18
# %bb.19: # %_Z14__cudaSafeCall10hipError_tPKci.exit101
sarq $32, %r15
movl $d_Kernel, %edi
movq %r12, %rsi
movq %r15, %rdx
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
testl %eax, %eax
movq 192(%rsp), %r14 # 8-byte Reload
movq %rbx, 120(%rsp) # 8-byte Spill
jne .LBB5_20
# %bb.21: # %_Z14__cudaSafeCall10hipError_tPKci.exit103
movq 16(%rsp), %rcx # 8-byte Reload
leal 15(%rcx), %eax
testl %ecx, %ecx
cmovnsl %ecx, %eax
sarl $4, %eax
leal 15(%r14), %r15d
testl %r14d, %r14d
cmovnsl %r14d, %r15d
movabsq $68719476752, %rbx # imm = 0x1000000010
sarl $4, %r15d
shlq $32, %r15
orq %rax, %r15
callq clock
movq %rax, start(%rip)
movq %r15, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
movq 184(%rsp), %rbp # 8-byte Reload
jne .LBB5_23
# %bb.22:
movq 32(%rsp), %rax
movq 40(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq 16(%rsp), %rax # 8-byte Reload
movl %eax, 12(%rsp)
movl %r14d, 8(%rsp)
movq 112(%rsp), %rax # 8-byte Reload
movl %eax, 4(%rsp)
leaq 104(%rsp), %rax
movq %rax, 128(%rsp)
leaq 96(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 4(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z17convolutionRowGPUPfS_iii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_23:
callq hipGetLastError
testl %eax, %eax
jne .LBB5_24
# %bb.27:
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB5_28
# %bb.29: # %_Z16__cudaCheckErrorPKci.exit
callq hipDeviceSynchronize
callq clock
movq %rax, row(%rip)
subq start(%rip), %rax
cvtsi2sd %rax, %xmm0
divsd .LCPI5_0(%rip), %xmm0
movl $.L.str.10, %edi
movsd %xmm0, 168(%rsp) # 8-byte Spill
movb $1, %al
callq printf
movq %r15, %rdi
movl $1, %esi
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
movq 112(%rsp), %rbx # 8-byte Reload
jne .LBB5_31
# %bb.30:
movq 24(%rsp), %rax
movq 40(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq 16(%rsp), %rax # 8-byte Reload
movl %eax, 12(%rsp)
movl %r14d, 8(%rsp)
movl %ebx, 4(%rsp)
leaq 104(%rsp), %rax
movq %rax, 128(%rsp)
leaq 96(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 4(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z17convolutionColGPUPfS_iii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_31:
callq hipGetLastError
testl %eax, %eax
jne .LBB5_32
# %bb.34:
movq %rbp, %r15
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB5_35
# %bb.36: # %_Z16__cudaCheckErrorPKci.exit113
callq hipDeviceSynchronize
movq 32(%rsp), %rsi
movq 120(%rsp), %rdi # 8-byte Reload
movq 176(%rsp), %rbp # 8-byte Reload
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB5_37
# %bb.38: # %_Z14__cudaSafeCall10hipError_tPKci.exit115
movq 24(%rsp), %rsi
movq %r15, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB5_39
# %bb.40: # %_Z17convolutionColCPUPfS_S_iii.exit
cvtsi2sdl 16(%rsp), %xmm1 # 4-byte Folded Reload
cvtsi2sd %r14d, %xmm2
xorps %xmm0, %xmm0
cvtsi2sd %ebx, %xmm0
mulsd %xmm1, %xmm2
mulsd %xmm2, %xmm0
mulsd .LCPI5_1(%rip), %xmm0
movsd 168(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
divsd .LCPI5_2(%rip), %xmm1
divsd %xmm1, %xmm0
movl $.L.str.11, %edi
movb $1, %al
callq printf
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movl $d_Kernel, %edi
callq hipFree
movq %r12, %rdi
callq hipHostFree
movq %r13, %rdi
callq hipHostFree
movq 120(%rsp), %rdi # 8-byte Reload
callq hipHostFree
movq %r15, %rdi
callq hipHostFree
jmp .LBB5_41
.LBB5_43:
movl $.Lstr, %edi
callq puts@PLT
movl $.Lstr.1, %edi
callq puts@PLT
movl $.Lstr.2, %edi
callq puts@PLT
movl $.Lstr.3, %edi
callq puts@PLT
.LBB5_41:
xorl %eax, %eax
jmp .LBB5_42
.LBB5_6: # %_Z11check_inputiii.exit
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $-1, %eax
.LBB5_42:
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB5_12:
.cfi_def_cfa_offset 256
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.12, %esi
movl $.L.str.9, %edx
movq %rbx, %rdi
movl $323, %ecx # imm = 0x143
jmp .LBB5_26
.LBB5_14:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.12, %esi
movl $.L.str.9, %edx
movq %rbx, %rdi
movl $324, %ecx # imm = 0x144
jmp .LBB5_26
.LBB5_16:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.12, %esi
movl $.L.str.9, %edx
movq %rbx, %rdi
movl $325, %ecx # imm = 0x145
jmp .LBB5_26
.LBB5_18:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.12, %esi
movl $.L.str.9, %edx
movq %rbx, %rdi
movl $328, %ecx # imm = 0x148
jmp .LBB5_26
.LBB5_20:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.12, %esi
movl $.L.str.9, %edx
movq %rbx, %rdi
movl $329, %ecx # imm = 0x149
jmp .LBB5_26
.LBB5_24:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.13, %esi
jmp .LBB5_25
.LBB5_28:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.14, %esi
.LBB5_25:
movl $.L.str.9, %edx
movq %rbx, %rdi
movl $337, %ecx # imm = 0x151
jmp .LBB5_26
.LBB5_32:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.13, %esi
jmp .LBB5_33
.LBB5_35:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.14, %esi
.LBB5_33:
movl $.L.str.9, %edx
movq %rbx, %rdi
movl $345, %ecx # imm = 0x159
jmp .LBB5_26
.LBB5_37:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.12, %esi
movl $.L.str.9, %edx
movq %rbx, %rdi
movl $351, %ecx # imm = 0x15F
jmp .LBB5_26
.LBB5_39:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.12, %esi
movl $.L.str.9, %edx
movq %rbx, %rdi
movl $352, %ecx # imm = 0x160
.LBB5_26:
movq %rax, %r8
xorl %eax, %eax
callq fprintf
movl $-1, %edi
callq exit
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17convolutionRowGPUPfS_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17convolutionColGPUPfS_iii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $d_Kernel, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $1028, %r9d # imm = 0x404
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type d_Kernel,@object # @d_Kernel
.local d_Kernel
.comm d_Kernel,1028,16
.type start,@object # @start
.bss
.globl start
.p2align 3, 0x0
start:
.quad 0 # 0x0
.size start, 8
.type row,@object # @row
.globl row
.p2align 3, 0x0
row:
.quad 0 # 0x0
.size row, 8
.type col,@object # @col
.globl col
.p2align 3, 0x0
col:
.quad 0 # 0x0
.size col, 8
.type _Z17convolutionRowGPUPfS_iii,@object # @_Z17convolutionRowGPUPfS_iii
.section .rodata,"a",@progbits
.globl _Z17convolutionRowGPUPfS_iii
.p2align 3, 0x0
_Z17convolutionRowGPUPfS_iii:
.quad _Z32__device_stub__convolutionRowGPUPfS_iii
.size _Z17convolutionRowGPUPfS_iii, 8
.type _Z17convolutionColGPUPfS_iii,@object # @_Z17convolutionColGPUPfS_iii
.globl _Z17convolutionColGPUPfS_iii
.p2align 3, 0x0
_Z17convolutionColGPUPfS_iii:
.quad _Z32__device_stub__convolutionColGPUPfS_iii
.size _Z17convolutionColGPUPfS_iii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Input for dimX, dimY, dimK must larger than 0"
.size .L.str, 46
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/CYP0630/CUDA_Practice/master/Convolution2D/convolution2D.hip"
.size .L.str.9, 118
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Row Convolution by using GPU: %f ms.\n"
.size .L.str.10, 38
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Throughput Performance: %f GFLOPs. \n"
.size .L.str.11, 37
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "cudaSafeCall() failed at %s:%i : %s\n"
.size .L.str.12, 37
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "cudaCheckError() failed at %s:%i : %s\n"
.size .L.str.13, 39
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "cudaCheckError() with sync failed at %s:%i : %s\n"
.size .L.str.14, 49
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z17convolutionRowGPUPfS_iii"
.size .L__unnamed_1, 29
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z17convolutionColGPUPfS_iii"
.size .L__unnamed_2, 29
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "d_Kernel"
.size .L__unnamed_3, 9
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Error input Parameter "
.size .Lstr, 23
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Please Follow Format to Run Program: ./execute_file <dimX> <dimY> <dimK>"
.size .Lstr.1, 73
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Please input <dimX>, <dimY>, <dimK> "
.size .Lstr.2, 37
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "dimX and dimY are width and heights for input image and dimK is size for mask "
.size .Lstr.3, 79
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "Input Data"
.size .Lstr.4, 11
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "Your Mask Size is too large. "
.size .Lstr.5, 30
.type .Lstr.6,@object # @str.6
.Lstr.6:
.asciz "We recommend you change a reasonable number. "
.size .Lstr.6, 46
.type .Lstr.7,@object # @str.7
.Lstr.7:
.asciz "Input is Valid \n"
.size .Lstr.7, 17
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__convolutionRowGPUPfS_iii
.addrsig_sym _Z32__device_stub__convolutionColGPUPfS_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym d_Kernel
.addrsig_sym _Z17convolutionRowGPUPfS_iii
.addrsig_sym _Z17convolutionColGPUPfS_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <iterator>
#include <thrust/copy.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/transform.h>
#include <algorithm>
#include <chrono>
#include <vector>
#include <thrust/sort.h>
int main()
{
auto vec_size =500000000;
auto start = std::chrono::high_resolution_clock::now();
auto end = std::chrono::high_resolution_clock::now();
{
std::cout <<std::endl<<"sort test all std"<<std::endl;
auto start = std::chrono::high_resolution_clock::now();
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), [](double & d){d=rand();});
std::sort(vec.begin(), vec.end());
auto end = std::chrono::high_resolution_clock::now();
std::cout << "STD sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
{
std::cout <<std::endl<<"sort test std"<<std::endl;
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), [](double & d){d=rand();});
auto start = std::chrono::high_resolution_clock::now();
std::sort(vec.begin(), vec.end());
auto end = std::chrono::high_resolution_clock::now();
std::cout << "STD sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
{
std::cout<<"cuda all sort"<<std::endl;
start = std::chrono::high_resolution_clock::now();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), [](double & d){d=rand();});
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
hv = d;
end = std::chrono::high_resolution_clock::now();
std::cout << "Cuda sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
{
std::cout<<"cuda sort"<<std::endl;
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), [](double & d){d=rand();});
start = std::chrono::high_resolution_clock::now();
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
hv = d;
end = std::chrono::high_resolution_clock::now();
std::cout << "Cuda sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
{
std::cout<<"cuda sort without aloc included"<<std::endl;
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), [](double & d){d=rand();});
thrust::device_vector<double> d(vec_size);
start = std::chrono::high_resolution_clock::now();
thrust::copy(hv.begin(), hv.end(), d.begin());
thrust::sort(d.begin(), d.end());
end = std::chrono::high_resolution_clock::now();
hv = d;
std::cout << "Cuda sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
{
std::cout<<"cuda sort without copy and aloc included"<<std::endl;
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), [](double & d){d=rand();});
thrust::device_vector<double> d = hv;
start = std::chrono::high_resolution_clock::now();
thrust::sort(d.begin(), d.end());
end = std::chrono::high_resolution_clock::now();
hv = d;
std::cout << "Cuda sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
} | #include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <iterator>
#include <thrust/copy.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/transform.h>
#include <algorithm>
#include <chrono>
#include <vector>
#include <thrust/sort.h>
int main()
{
auto vec_size =500000000;
auto start = std::chrono::high_resolution_clock::now();
auto end = std::chrono::high_resolution_clock::now();
{
std::cout <<std::endl<<"sort test all std"<<std::endl;
auto start = std::chrono::high_resolution_clock::now();
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), [](double & d){d=rand();});
std::sort(vec.begin(), vec.end());
auto end = std::chrono::high_resolution_clock::now();
std::cout << "STD sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
{
std::cout <<std::endl<<"sort test std"<<std::endl;
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), [](double & d){d=rand();});
auto start = std::chrono::high_resolution_clock::now();
std::sort(vec.begin(), vec.end());
auto end = std::chrono::high_resolution_clock::now();
std::cout << "STD sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
{
std::cout<<"cuda all sort"<<std::endl;
start = std::chrono::high_resolution_clock::now();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), [](double & d){d=rand();});
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
hv = d;
end = std::chrono::high_resolution_clock::now();
std::cout << "Cuda sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
{
std::cout<<"cuda sort"<<std::endl;
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), [](double & d){d=rand();});
start = std::chrono::high_resolution_clock::now();
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
hv = d;
end = std::chrono::high_resolution_clock::now();
std::cout << "Cuda sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
{
std::cout<<"cuda sort without aloc included"<<std::endl;
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), [](double & d){d=rand();});
thrust::device_vector<double> d(vec_size);
start = std::chrono::high_resolution_clock::now();
thrust::copy(hv.begin(), hv.end(), d.begin());
thrust::sort(d.begin(), d.end());
end = std::chrono::high_resolution_clock::now();
hv = d;
std::cout << "Cuda sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
{
std::cout<<"cuda sort without copy and aloc included"<<std::endl;
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), [](double & d){d=rand();});
thrust::device_vector<double> d = hv;
start = std::chrono::high_resolution_clock::now();
thrust::sort(d.begin(), d.end());
end = std::chrono::high_resolution_clock::now();
hv = d;
std::cout << "Cuda sort took "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< "ms.\n";
}
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //STL
#include <iostream>
#include <vector>
#include <thread>
#include <time.h>
using namespace std;
////////////////////////////////////////// HOST //////////////////////////////////////////////////////
int N = 20000; //GPU calculations are effective for N > 8k
vector < vector < float > > firstMatrix( N );
vector < vector < float > > secondVectorMatrix( N );
vector < vector < float > > resultsHostMatrix( N );
vector < vector< float > > resultsGPUMatrix( N );
vector < float > firstVector( N, 3.14 );
vector < float > secondVector( N, 2.72 );
vector < float > resultsHost( N );
vector < float > wynikGPU( N, 0 );
////////////////////////////////////////// GPU ////////////////////////////////////////////////////////
float *dev_a1 = 0; float *dev_b1 = 0; float *dev_c1 = 0;
int typeSize = N * sizeof( float );
__global__ void add( float *a, float *b, float *c, int N ) //GPU
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while ( tid < N )
{
c[ tid ] = a[ tid ] + b[ tid ];
tid += blockDim.x * gridDim.x;
}
}
void memInit()
{
//GPU memory allocation
cudaMalloc( ( void** )&dev_a1, typeSize );
cudaMalloc( ( void** )&dev_b1, typeSize );
cudaMalloc( ( void** )&dev_c1, typeSize );
}
void memFree()
{
//free GPU objects
cudaFree( dev_a1 );
cudaFree( dev_b1 );
cudaFree( dev_c1 );
}
void addMatrixKernel( void *iter )
{
if ( *( int * )iter < N )
{
const int nThreads = 1024;
//copy / download data in direction HostToDevice
cudaMemcpyAsync( dev_a1, &firstMatrix[ *( int * ) iter ][ 0 ], typeSize, cudaMemcpyHostToDevice );
cudaMemcpyAsync( dev_b1, &secondVectorMatrix[ *( int * ) iter ][ 0 ], typeSize, cudaMemcpyHostToDevice );
//calculate vectors sum, using max. number of possible 1D Threads per Block
add<<< ( N + nThreads - 1 ) / nThreads, nThreads >>> ( dev_a1, dev_b1, dev_c1, N );
//copy / upload results data c[] in direction DeviceToHost
cudaMemcpyAsync( &resultsGPUMatrix[ *( int * ) iter ][ 0 ], dev_c1, typeSize, cudaMemcpyDeviceToHost );
}
}
////////////////////////////////////////// MAIN ///////////////////////////////////////////////////////
int main ()
{
for ( int i = 0; i < N; i++ ) //basic data processing on Host CPU
{
firstMatrix[ i ] = firstVector;
secondVectorMatrix[ i ] = secondVector;
resultsGPUMatrix[ i ] = wynikGPU;
resultsHostMatrix[ i ] = resultsHost;
}
clock_t t;
t = clock();
for ( int j = 0; j < N; j++ )
for ( int i = 0; i < N; i++ )
resultsHostMatrix[ j ][ i ] = firstMatrix[ j ][ i ] + secondVectorMatrix[ j ][ i ];
cout << "sequential CPU calculations Host time: " << ((float)(clock() - t))/CLOCKS_PER_SEC << "[s] ( g++ )" << endl;
t = clock();
memInit();
vector < thread > gpuAsync3( N );
for ( int i = 0; i < N; i++ )
{
int *iPtr = &( i );
gpuAsync3[ i ] = thread( addMatrixKernel, iPtr );
gpuAsync3[ i ].join();
}
memFree();
cout << "Async (single join() + trivial Optimalization) vec<vec<>> GPU time: " << ((float)(clock() - t))/CLOCKS_PER_SEC << "[s]" << endl;
t = clock();
for ( int i = 0; i < 2; i++ )
{
cout << "resultsHostMatrix[ " << i << " ][ 0 ]: " << resultsHostMatrix[ i ][ 0 ] << endl;
cout << "resultsGPUMatrix[ " << i << " ][ 0 ]: " << resultsGPUMatrix[ i ][ 0 ] << endl;
}
cudaDeviceReset();
return 0;
} | code for sm_80
Function : _Z3addPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x001fd400000001ff */
/*0080*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe200078e0207 */
/*00d0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */
/* 0x000fca0000000f00 */
/*00e0*/ IMAD R0, R11, c[0x0][0xc], R0 ; /* 0x000003000b007a24 */
/* 0x000fca00078e0200 */
/*00f0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*0100*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*0110*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001ee000c101904 */
/*0120*/ @!P0 BRA 0x70 ; /* 0xffffff4000008947 */
/* 0x000fea000383ffff */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //STL
#include <iostream>
#include <vector>
#include <thread>
#include <time.h>
using namespace std;
////////////////////////////////////////// HOST //////////////////////////////////////////////////////
int N = 20000; //GPU calculations are effective for N > 8k
vector < vector < float > > firstMatrix( N );
vector < vector < float > > secondVectorMatrix( N );
vector < vector < float > > resultsHostMatrix( N );
vector < vector< float > > resultsGPUMatrix( N );
vector < float > firstVector( N, 3.14 );
vector < float > secondVector( N, 2.72 );
vector < float > resultsHost( N );
vector < float > wynikGPU( N, 0 );
////////////////////////////////////////// GPU ////////////////////////////////////////////////////////
float *dev_a1 = 0; float *dev_b1 = 0; float *dev_c1 = 0;
int typeSize = N * sizeof( float );
__global__ void add( float *a, float *b, float *c, int N ) //GPU
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while ( tid < N )
{
c[ tid ] = a[ tid ] + b[ tid ];
tid += blockDim.x * gridDim.x;
}
}
void memInit()
{
//GPU memory allocation
cudaMalloc( ( void** )&dev_a1, typeSize );
cudaMalloc( ( void** )&dev_b1, typeSize );
cudaMalloc( ( void** )&dev_c1, typeSize );
}
void memFree()
{
//free GPU objects
cudaFree( dev_a1 );
cudaFree( dev_b1 );
cudaFree( dev_c1 );
}
void addMatrixKernel( void *iter )
{
if ( *( int * )iter < N )
{
const int nThreads = 1024;
//copy / download data in direction HostToDevice
cudaMemcpyAsync( dev_a1, &firstMatrix[ *( int * ) iter ][ 0 ], typeSize, cudaMemcpyHostToDevice );
cudaMemcpyAsync( dev_b1, &secondVectorMatrix[ *( int * ) iter ][ 0 ], typeSize, cudaMemcpyHostToDevice );
//calculate vectors sum, using max. number of possible 1D Threads per Block
add<<< ( N + nThreads - 1 ) / nThreads, nThreads >>> ( dev_a1, dev_b1, dev_c1, N );
//copy / upload results data c[] in direction DeviceToHost
cudaMemcpyAsync( &resultsGPUMatrix[ *( int * ) iter ][ 0 ], dev_c1, typeSize, cudaMemcpyDeviceToHost );
}
}
////////////////////////////////////////// MAIN ///////////////////////////////////////////////////////
int main ()
{
for ( int i = 0; i < N; i++ ) //basic data processing on Host CPU
{
firstMatrix[ i ] = firstVector;
secondVectorMatrix[ i ] = secondVector;
resultsGPUMatrix[ i ] = wynikGPU;
resultsHostMatrix[ i ] = resultsHost;
}
clock_t t;
t = clock();
for ( int j = 0; j < N; j++ )
for ( int i = 0; i < N; i++ )
resultsHostMatrix[ j ][ i ] = firstMatrix[ j ][ i ] + secondVectorMatrix[ j ][ i ];
cout << "sequential CPU calculations Host time: " << ((float)(clock() - t))/CLOCKS_PER_SEC << "[s] ( g++ )" << endl;
t = clock();
memInit();
vector < thread > gpuAsync3( N );
for ( int i = 0; i < N; i++ )
{
int *iPtr = &( i );
gpuAsync3[ i ] = thread( addMatrixKernel, iPtr );
gpuAsync3[ i ].join();
}
memFree();
cout << "Async (single join() + trivial Optimalization) vec<vec<>> GPU time: " << ((float)(clock() - t))/CLOCKS_PER_SEC << "[s]" << endl;
t = clock();
for ( int i = 0; i < 2; i++ )
{
cout << "resultsHostMatrix[ " << i << " ][ 0 ]: " << resultsHostMatrix[ i ][ 0 ] << endl;
cout << "resultsGPUMatrix[ " << i << " ][ 0 ]: " << resultsGPUMatrix[ i ][ 0 ] << endl;
}
cudaDeviceReset();
return 0;
} | .file "tmpxft_0003e4ce_00000000-6_asyncOpto.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.section .text._ZNSt6thread24_M_thread_deps_never_runEv,"axG",@progbits,_ZNSt6thread24_M_thread_deps_never_runEv,comdat
.weak _ZNSt6thread24_M_thread_deps_never_runEv
.type _ZNSt6thread24_M_thread_deps_never_runEv, @function
_ZNSt6thread24_M_thread_deps_never_runEv:
.LFB4111:
.cfi_startproc
endbr64
ret
.cfi_endproc
.LFE4111:
.size _ZNSt6thread24_M_thread_deps_never_runEv, .-_ZNSt6thread24_M_thread_deps_never_runEv
.section .text._ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEE6_M_runEv,"axG",@progbits,_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEE6_M_runEv,comdat
.align 2
.weak _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEE6_M_runEv
.type _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEE6_M_runEv, @function
_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEE6_M_runEv:
.LFB5318:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq %rdi, %rax
movq 8(%rdi), %rdi
call *16(%rax)
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE5318:
.size _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEE6_M_runEv, .-_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEE6_M_runEv
.section .text._ZNSt6vectorIfSaIfEED2Ev,"axG",@progbits,_ZNSt6vectorIfSaIfEED5Ev,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEED2Ev
.type _ZNSt6vectorIfSaIfEED2Ev, @function
_ZNSt6vectorIfSaIfEED2Ev:
.LFB5310:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L7
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L7:
ret
.cfi_endproc
.LFE5310:
.size _ZNSt6vectorIfSaIfEED2Ev, .-_ZNSt6vectorIfSaIfEED2Ev
.weak _ZNSt6vectorIfSaIfEED1Ev
.set _ZNSt6vectorIfSaIfEED1Ev,_ZNSt6vectorIfSaIfEED2Ev
.section .text._ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED2Ev,"axG",@progbits,_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED5Ev,comdat
.align 2
.weak _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED2Ev
.type _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED2Ev, @function
_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED2Ev:
.LFB5314:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq 16+_ZTVNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE(%rip), %rax
movq %rax, (%rdi)
call _ZNSt6thread6_StateD2Ev@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE5314:
.size _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED2Ev, .-_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED2Ev
.weak _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED1Ev
.set _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED1Ev,_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED2Ev
.section .text._ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED0Ev,"axG",@progbits,_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED5Ev,comdat
.align 2
.weak _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED0Ev
.type _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED0Ev, @function
_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED0Ev:
.LFB5316:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
leaq 16+_ZTVNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE(%rip), %rax
movq %rax, (%rdi)
call _ZNSt6thread6_StateD2Ev@PLT
movl $24, %esi
movq %rbx, %rdi
call _ZdlPvm@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE5316:
.size _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED0Ev, .-_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED0Ev
.text
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4248:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4248:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .text._ZNSt6vectorIS_IfSaIfEESaIS1_EED2Ev,"axG",@progbits,_ZNSt6vectorIS_IfSaIfEESaIS1_EED5Ev,comdat
.align 2
.weak _ZNSt6vectorIS_IfSaIfEESaIS1_EED2Ev
.type _ZNSt6vectorIS_IfSaIfEESaIS1_EED2Ev, @function
_ZNSt6vectorIS_IfSaIfEESaIS1_EED2Ev:
.LFB5320:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
movq 8(%rdi), %rbp
movq (%rdi), %rbx
cmpq %rbx, %rbp
jne .L19
.L17:
movq (%r12), %rdi
testq %rdi, %rdi
je .L16
movq 16(%r12), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
.L16:
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
addq $24, %rbx
cmpq %rbx, %rbp
je .L17
.L19:
movq (%rbx), %rdi
testq %rdi, %rdi
je .L18
movq 16(%rbx), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
jmp .L18
.cfi_endproc
.LFE5320:
.size _ZNSt6vectorIS_IfSaIfEESaIS1_EED2Ev, .-_ZNSt6vectorIS_IfSaIfEESaIS1_EED2Ev
.weak _ZNSt6vectorIS_IfSaIfEESaIS1_EED1Ev
.set _ZNSt6vectorIS_IfSaIfEESaIS1_EED1Ev,_ZNSt6vectorIS_IfSaIfEESaIS1_EED2Ev
.text
.globl _Z7memInitv
.type _Z7memInitv, @function
_Z7memInitv:
.LFB4242:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movslq typeSize(%rip), %rsi
leaq dev_a1(%rip), %rdi
call cudaMalloc@PLT
movslq typeSize(%rip), %rsi
leaq dev_b1(%rip), %rdi
call cudaMalloc@PLT
movslq typeSize(%rip), %rsi
leaq dev_c1(%rip), %rdi
call cudaMalloc@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4242:
.size _Z7memInitv, .-_Z7memInitv
.globl _Z7memFreev
.type _Z7memFreev, @function
_Z7memFreev:
.LFB4243:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq dev_a1(%rip), %rdi
call cudaFree@PLT
movq dev_b1(%rip), %rdi
call cudaFree@PLT
movq dev_c1(%rip), %rdi
call cudaFree@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4243:
.size _Z7memFreev, .-_Z7memFreev
.globl _Z27__device_stub__Z3addPfS_S_iPfS_S_i
.type _Z27__device_stub__Z3addPfS_S_iPfS_S_i, @function
_Z27__device_stub__Z3addPfS_S_iPfS_S_i:
.LFB4270:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4270:
.size _Z27__device_stub__Z3addPfS_S_iPfS_S_i, .-_Z27__device_stub__Z3addPfS_S_iPfS_S_i
.globl _Z3addPfS_S_i
.type _Z3addPfS_S_i, @function
_Z3addPfS_S_i:
.LFB4271:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3addPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4271:
.size _Z3addPfS_S_i, .-_Z3addPfS_S_i
.globl _Z15addMatrixKernelPv
.type _Z15addMatrixKernelPv, @function
_Z15addMatrixKernelPv:
.LFB4244:
.cfi_startproc
endbr64
movl (%rdi), %eax
cmpl N(%rip), %eax
jl .L42
ret
.L42:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $32, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbx
movslq typeSize(%rip), %rdx
cltq
leaq (%rax,%rax,2), %rax
salq $3, %rax
addq firstMatrix(%rip), %rax
movq (%rax), %rsi
movl $0, %r8d
movl $1, %ecx
movq dev_a1(%rip), %rdi
call cudaMemcpyAsync@PLT
movslq typeSize(%rip), %rdx
movslq (%rbx), %rax
leaq (%rax,%rax,2), %rax
salq $3, %rax
addq secondVectorMatrix(%rip), %rax
movq (%rax), %rsi
movl $0, %r8d
movl $1, %ecx
movq dev_b1(%rip), %rdi
call cudaMemcpyAsync@PLT
movl $1024, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl N(%rip), %edx
leal 2046(%rdx), %eax
addl $1023, %edx
cmovns %edx, %eax
sarl $10, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L43
.L37:
movslq typeSize(%rip), %rdx
movslq (%rbx), %rax
leaq (%rax,%rax,2), %rax
salq $3, %rax
addq resultsGPUMatrix(%rip), %rax
movq (%rax), %rdi
movl $0, %r8d
movl $2, %ecx
movq dev_c1(%rip), %rsi
call cudaMemcpyAsync@PLT
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
movl N(%rip), %ecx
movq dev_c1(%rip), %rdx
movq dev_b1(%rip), %rsi
movq dev_a1(%rip), %rdi
call _Z27__device_stub__Z3addPfS_S_iPfS_S_i
jmp .L37
.cfi_endproc
.LFE4244:
.size _Z15addMatrixKernelPv, .-_Z15addMatrixKernelPv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3addPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4273:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4273:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .rodata._ZNSt6vectorIS_IfSaIfEESaIS1_EEC2EmRKS2_.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "cannot create std::vector larger than max_size()"
.section .text._ZNSt6vectorIS_IfSaIfEESaIS1_EEC2EmRKS2_,"axG",@progbits,_ZNSt6vectorIS_IfSaIfEESaIS1_EEC5EmRKS2_,comdat
.align 2
.weak _ZNSt6vectorIS_IfSaIfEESaIS1_EEC2EmRKS2_
.type _ZNSt6vectorIS_IfSaIfEESaIS1_EEC2EmRKS2_, @function
_ZNSt6vectorIS_IfSaIfEESaIS1_EEC2EmRKS2_:
.LFB4610:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movabsq $384307168202282325, %rax
cmpq %rsi, %rax
jb .L54
movq %rdi, %rbp
movq $0, (%rdi)
movq $0, 8(%rdi)
movq $0, 16(%rdi)
testq %rsi, %rsi
je .L48
leaq (%rsi,%rsi,2), %rdx
leaq 0(,%rdx,8), %rbx
movq %rbx, %rdi
call _Znwm@PLT
movq %rax, 0(%rbp)
leaq (%rax,%rbx), %rdx
movq %rdx, 16(%rbp)
.L49:
movq $0, (%rax)
movq $0, 8(%rax)
movq $0, 16(%rax)
addq $24, %rax
cmpq %rdx, %rax
jne .L49
.L50:
movq %rdx, 8(%rbp)
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
leaq .LC1(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L48:
movq $0, (%rdi)
movq $0, 16(%rdi)
movl $0, %edx
jmp .L50
.cfi_endproc
.LFE4610:
.size _ZNSt6vectorIS_IfSaIfEESaIS1_EEC2EmRKS2_, .-_ZNSt6vectorIS_IfSaIfEESaIS1_EEC2EmRKS2_
.weak _ZNSt6vectorIS_IfSaIfEESaIS1_EEC1EmRKS2_
.set _ZNSt6vectorIS_IfSaIfEESaIS1_EEC1EmRKS2_,_ZNSt6vectorIS_IfSaIfEESaIS1_EEC2EmRKS2_
.section .text._ZNSt6vectorIfSaIfEEC2EmRKfRKS0_,"axG",@progbits,_ZNSt6vectorIfSaIfEEC5EmRKfRKS0_,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEEC2EmRKfRKS0_
.type _ZNSt6vectorIfSaIfEEC2EmRKfRKS0_, @function
_ZNSt6vectorIfSaIfEEC2EmRKfRKS0_:
.LFB4619:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rsi, %rax
shrq $61, %rax
jne .L63
movq %rdi, %rbx
movq %rdx, %rbp
movq $0, (%rdi)
movq $0, 8(%rdi)
movq $0, 16(%rdi)
testq %rsi, %rsi
je .L57
leaq 0(,%rsi,4), %r12
movq %r12, %rdi
call _Znwm@PLT
movq %rax, (%rbx)
leaq (%rax,%r12), %rdx
movq %rdx, 16(%rbx)
movss 0(%rbp), %xmm0
.L58:
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L58
.L59:
movq %rdx, 8(%rbx)
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
leaq .LC1(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L57:
movq $0, (%rdi)
movq $0, 16(%rdi)
movl $0, %edx
jmp .L59
.cfi_endproc
.LFE4619:
.size _ZNSt6vectorIfSaIfEEC2EmRKfRKS0_, .-_ZNSt6vectorIfSaIfEEC2EmRKfRKS0_
.weak _ZNSt6vectorIfSaIfEEC1EmRKfRKS0_
.set _ZNSt6vectorIfSaIfEEC1EmRKfRKS0_,_ZNSt6vectorIfSaIfEEC2EmRKfRKS0_
.section .text._ZNSt6vectorIfSaIfEEaSERKS1_,"axG",@progbits,_ZNSt6vectorIfSaIfEEaSERKS1_,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEEaSERKS1_
.type _ZNSt6vectorIfSaIfEEaSERKS1_, @function
_ZNSt6vectorIfSaIfEEaSERKS1_:
.LFB4626:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbx
cmpq %rdi, %rsi
je .L65
movq %rsi, %r12
movq (%rsi), %r13
movq 8(%rsi), %rbp
subq %r13, %rbp
movq (%rdi), %rdi
movq 16(%rbx), %rax
subq %rdi, %rax
cmpq %rbp, %rax
jb .L78
movq 8(%rbx), %rdx
subq %rdi, %rdx
cmpq %rbp, %rdx
jb .L72
cmpq $4, %rbp
jle .L73
movq %rbp, %rdx
movq %r13, %rsi
call memmove@PLT
jmp .L71
.L78:
movabsq $9223372036854775804, %rax
cmpq %rbp, %rax
jb .L79
movq %rbp, %rdi
call _Znwm@PLT
movq %rax, %r12
cmpq $4, %rbp
jle .L68
movq %rbp, %rdx
movq %r13, %rsi
movq %rax, %rdi
call memmove@PLT
.L69:
movq (%rbx), %rdi
testq %rdi, %rdi
je .L70
movq 16(%rbx), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
.L70:
movq %r12, (%rbx)
addq %rbp, %r12
movq %r12, 16(%rbx)
.L71:
addq (%rbx), %rbp
movq %rbp, 8(%rbx)
.L65:
movq %rbx, %rax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L79:
.cfi_restore_state
call _ZSt28__throw_bad_array_new_lengthv@PLT
.L68:
movss 0(%r13), %xmm0
movss %xmm0, (%rax)
jmp .L69
.L73:
jne .L71
movss 0(%r13), %xmm0
movss %xmm0, (%rdi)
jmp .L71
.L72:
cmpq $4, %rdx
jle .L74
movq %r13, %rsi
call memmove@PLT
.L75:
movq 8(%rbx), %rdi
movq %rdi, %rsi
subq (%rbx), %rsi
addq (%r12), %rsi
movq 8(%r12), %rdx
subq %rsi, %rdx
cmpq $4, %rdx
jle .L76
call memmove@PLT
jmp .L71
.L74:
jne .L75
movss 0(%r13), %xmm0
movss %xmm0, (%rdi)
jmp .L75
.L76:
jne .L71
movss (%rsi), %xmm0
movss %xmm0, (%rdi)
jmp .L71
.cfi_endproc
.LFE4626:
.size _ZNSt6vectorIfSaIfEEaSERKS1_, .-_ZNSt6vectorIfSaIfEEaSERKS1_
.section .text._ZNSt6vectorISt6threadSaIS0_EED2Ev,"axG",@progbits,_ZNSt6vectorISt6threadSaIS0_EED5Ev,comdat
.align 2
.weak _ZNSt6vectorISt6threadSaIS0_EED2Ev
.type _ZNSt6vectorISt6threadSaIS0_EED2Ev, @function
_ZNSt6vectorISt6threadSaIS0_EED2Ev:
.LFB4643:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 8(%rdi), %rdx
movq (%rdi), %rcx
cmpq %rcx, %rdx
je .L81
movq %rcx, %rax
.L83:
cmpq $0, (%rax)
jne .L87
addq $8, %rax
cmpq %rax, %rdx
jne .L83
.L81:
testq %rcx, %rcx
je .L80
movq 16(%rdi), %rsi
subq %rcx, %rsi
movq %rcx, %rdi
call _ZdlPvm@PLT
.L80:
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L87:
.cfi_restore_state
call _ZSt9terminatev@PLT
.cfi_endproc
.LFE4643:
.size _ZNSt6vectorISt6threadSaIS0_EED2Ev, .-_ZNSt6vectorISt6threadSaIS0_EED2Ev
.weak _ZNSt6vectorISt6threadSaIS0_EED1Ev
.set _ZNSt6vectorISt6threadSaIS0_EED1Ev,_ZNSt6vectorISt6threadSaIS0_EED2Ev
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "sequential CPU calculations Host time: "
.section .rodata.str1.1
.LC4:
.string "[s] ( g++ )"
.section .rodata.str1.8
.align 8
.LC5:
.string "Async (single join() + trivial Optimalization) vec<vec<>> GPU time: "
.section .rodata.str1.1
.LC6:
.string "[s]"
.LC7:
.string "resultsHostMatrix[ "
.LC8:
.string " ][ 0 ]: "
.LC9:
.string "resultsGPUMatrix[ "
.text
.globl main
.type main, @function
main:
.LFB4245:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4245
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
cmpl $0, N(%rip)
jle .L89
movl $0, %ebx
movl $0, %ebp
leaq firstVector(%rip), %r15
leaq secondVector(%rip), %r14
leaq wynikGPU(%rip), %r13
leaq resultsHost(%rip), %r12
.L90:
movq %rbx, %rdi
addq firstMatrix(%rip), %rdi
movq %r15, %rsi
.LEHB0:
call _ZNSt6vectorIfSaIfEEaSERKS1_
movq %rbx, %rdi
addq secondVectorMatrix(%rip), %rdi
movq %r14, %rsi
call _ZNSt6vectorIfSaIfEEaSERKS1_
movq %rbx, %rdi
addq resultsGPUMatrix(%rip), %rdi
movq %r13, %rsi
call _ZNSt6vectorIfSaIfEEaSERKS1_
movq %rbx, %rdi
addq resultsHostMatrix(%rip), %rdi
movq %r12, %rsi
call _ZNSt6vectorIfSaIfEEaSERKS1_
addl $1, %ebp
addq $24, %rbx
cmpl %ebp, N(%rip)
jg .L90
.L89:
call clock@PLT
movq %rax, %rbx
movl $0, %edx
movl $0, %r8d
cmpl $0, N(%rip)
jg .L91
.L92:
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbp
call clock@PLT
subq %rbx, %rax
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
divss .LC3(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call clock@PLT
movq %rax, %r15
call _Z7memInitv
movslq N(%rip), %rax
movq %rax, %rbx
shrq $60, %rbx
jne .L133
testq %rax, %rax
je .L97
leaq 0(,%rax,8), %rbp
movq %rbp, %rdi
call _Znwm@PLT
movq %rax, %rbx
movq %rax, 32(%rsp)
leaq (%rax,%rbp), %rdx
movq %rdx, 48(%rsp)
.L98:
movq $0, (%rax)
addq $8, %rax
cmpq %rdx, %rax
jne .L98
.L117:
movq %rdx, 40(%rsp)
movl $0, 12(%rsp)
cmpl $0, N(%rip)
jle .L99
leaq 16+_ZTVNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE(%rip), %rbp
leaq 12(%rsp), %r12
leaq _Z15addMatrixKernelPv(%rip), %r13
leaq _ZNSt6thread24_M_thread_deps_never_runEv(%rip), %r14
jmp .L106
.L93:
movq resultsHostMatrix(%rip), %rcx
movq (%rcx,%rdx), %rcx
movq firstMatrix(%rip), %rsi
movq (%rsi,%rdx), %rdi
movq secondVectorMatrix(%rip), %rsi
movq (%rsi,%rdx), %rsi
movss (%rdi,%rax,4), %xmm0
addss (%rsi,%rax,4), %xmm0
movss %xmm0, (%rcx,%rax,4)
addq $1, %rax
cmpl %eax, N(%rip)
jg .L93
.L94:
addl $1, %r8d
addq $24, %rdx
cmpl %r8d, N(%rip)
jle .L92
.L91:
movl $0, %eax
cmpl $0, N(%rip)
jg .L93
jmp .L94
.L133:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L134
leaq .LC1(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.LEHE0:
.L134:
call __stack_chk_fail@PLT
.L137:
movq %rbp, (%rax)
movq %r12, 8(%rax)
movq %r13, 16(%rax)
movq %rax, 24(%rsp)
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdi
movq %r14, %rdx
.LEHB1:
call _ZNSt6thread15_M_start_threadESt10unique_ptrINS_6_StateESt14default_deleteIS1_EEPFvvE@PLT
.LEHE1:
movq 24(%rsp), %rdi
testq %rdi, %rdi
je .L100
movq (%rdi), %rax
call *8(%rax)
.L100:
movslq 12(%rsp), %rax
leaq (%rbx,%rax,8), %rdi
cmpq $0, (%rdi)
jne .L135
movq 16(%rsp), %rax
movq %rax, (%rdi)
.LEHB2:
call _ZNSt6thread4joinEv@PLT
jmp .L136
.L122:
endbr64
movq %rax, %rbx
movq 24(%rsp), %rdi
testq %rdi, %rdi
je .L105
movq (%rdi), %rax
call *8(%rax)
.L105:
leaq 32(%rsp), %rdi
call _ZNSt6vectorISt6threadSaIS0_EED1Ev
movq 56(%rsp), %rax
subq %fs:40, %rax
je .L116
call __stack_chk_fail@PLT
.L135:
call _ZSt9terminatev@PLT
.L136:
movl 12(%rsp), %eax
addl $1, %eax
movl %eax, 12(%rsp)
cmpl N(%rip), %eax
jge .L99
.L106:
movq $0, 16(%rsp)
movl $24, %edi
call _Znwm@PLT
jmp .L137
.L99:
call _Z7memFreev
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbx
call clock@PLT
subq %r15, %rax
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
divss .LC3(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call clock@PLT
movl $0, %r12d
movl $0, %ebp
leaq _ZSt4cout(%rip), %r13
leaq .LC8(%rip), %r14
jmp .L115
.L145:
movl %ebp, %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbx
movl $9, %edx
movq %r14, %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq resultsHostMatrix(%rip), %rax
movq (%rax,%r12), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r15
testq %r15, %r15
je .L138
cmpb $0, 56(%r15)
je .L109
movzbl 67(%r15), %esi
.L110:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
jmp .L139
.L138:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L140
call _ZSt16__throw_bad_castv@PLT
.L121:
endbr64
movq %rax, %rbx
jmp .L105
.L140:
call __stack_chk_fail@PLT
.L109:
movq %r15, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r15), %rax
movl $10, %esi
movq %r15, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L110
.L139:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl $18, %edx
leaq .LC9(%rip), %rsi
movq %r13, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebp, %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbx
movl $9, %edx
movq %r14, %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq resultsGPUMatrix(%rip), %rax
movq (%rax,%r12), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r15
testq %r15, %r15
je .L141
cmpb $0, 56(%r15)
je .L113
movzbl 67(%r15), %esi
.L114:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
jmp .L142
.L141:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L143
call _ZSt16__throw_bad_castv@PLT
.L143:
call __stack_chk_fail@PLT
.L113:
movq %r15, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r15), %rax
movl $10, %esi
movq %r15, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L114
.L142:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addl $1, %ebp
addq $24, %r12
cmpl $2, %ebp
je .L144
.L115:
movl $19, %edx
leaq .LC7(%rip), %rsi
movq %r13, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
jmp .L145
.L144:
call cudaDeviceReset@PLT
.LEHE2:
leaq 32(%rsp), %rdi
call _ZNSt6vectorISt6threadSaIS0_EED1Ev
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L146
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L116:
.cfi_restore_state
movq %rbx, %rdi
.LEHB3:
call _Unwind_Resume@PLT
.LEHE3:
.L97:
movq $0, 32(%rsp)
movq $0, 48(%rsp)
movl $0, %ebx
movl $0, %edx
jmp .L117
.L146:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4245:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA4245:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4245-.LLSDACSB4245
.LLSDACSB4245:
.uleb128 .LEHB0-.LFB4245
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB4245
.uleb128 .LEHE1-.LEHB1
.uleb128 .L122-.LFB4245
.uleb128 0
.uleb128 .LEHB2-.LFB4245
.uleb128 .LEHE2-.LEHB2
.uleb128 .L121-.LFB4245
.uleb128 0
.uleb128 .LEHB3-.LFB4245
.uleb128 .LEHE3-.LEHB3
.uleb128 0
.uleb128 0
.LLSDACSE4245:
.text
.size main, .-main
.type _GLOBAL__sub_I_N, @function
_GLOBAL__sub_I_N:
.LFB5339:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $24, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
leaq 4(%rsp), %rbp
movslq N(%rip), %rsi
movq %rbp, %rdx
leaq firstMatrix(%rip), %r12
movq %r12, %rdi
call _ZNSt6vectorIS_IfSaIfEESaIS1_EEC1EmRKS2_
leaq __dso_handle(%rip), %rbx
movq %rbx, %rdx
movq %r12, %rsi
leaq _ZNSt6vectorIS_IfSaIfEESaIS1_EED1Ev(%rip), %r12
movq %r12, %rdi
call __cxa_atexit@PLT
movslq N(%rip), %rsi
movq %rbp, %rdx
leaq secondVectorMatrix(%rip), %r13
movq %r13, %rdi
call _ZNSt6vectorIS_IfSaIfEESaIS1_EEC1EmRKS2_
movq %rbx, %rdx
movq %r13, %rsi
movq %r12, %rdi
call __cxa_atexit@PLT
movslq N(%rip), %rsi
movq %rbp, %rdx
leaq resultsHostMatrix(%rip), %r13
movq %r13, %rdi
call _ZNSt6vectorIS_IfSaIfEESaIS1_EEC1EmRKS2_
movq %rbx, %rdx
movq %r13, %rsi
movq %r12, %rdi
call __cxa_atexit@PLT
movslq N(%rip), %rsi
movq %rbp, %rdx
leaq resultsGPUMatrix(%rip), %r13
movq %r13, %rdi
call _ZNSt6vectorIS_IfSaIfEESaIS1_EEC1EmRKS2_
movq %rbx, %rdx
movq %r13, %rsi
movq %r12, %rdi
call __cxa_atexit@PLT
movl $0x4048f5c3, 4(%rsp)
leaq 3(%rsp), %r13
movslq N(%rip), %rsi
movq %r13, %rcx
movq %rbp, %rdx
leaq firstVector(%rip), %r12
movq %r12, %rdi
call _ZNSt6vectorIfSaIfEEC1EmRKfRKS0_
movq %rbx, %rdx
movq %r12, %rsi
leaq _ZNSt6vectorIfSaIfEED1Ev(%rip), %r12
movq %r12, %rdi
call __cxa_atexit@PLT
movl $0x402e147b, 4(%rsp)
movslq N(%rip), %rsi
movq %r13, %rcx
movq %rbp, %rdx
leaq secondVector(%rip), %rbp
movq %rbp, %rdi
call _ZNSt6vectorIfSaIfEEC1EmRKfRKS0_
movq %rbx, %rdx
movq %rbp, %rsi
movq %r12, %rdi
call __cxa_atexit@PLT
movslq N(%rip), %rbp
movq %rbp, %rax
shrq $61, %rax
jne .L158
movq $0, resultsHost(%rip)
movq $0, 8+resultsHost(%rip)
movq $0, 16+resultsHost(%rip)
testq %rbp, %rbp
je .L150
leaq 0(,%rbp,4), %rbx
movq %rbx, %rdi
call _Znwm@PLT
movq %rax, resultsHost(%rip)
leaq (%rax,%rbx), %rdx
movq %rdx, 16+resultsHost(%rip)
movl $0x00000000, (%rax)
addq $4, %rax
cmpq $1, %rbp
je .L154
cmpq %rdx, %rax
je .L151
.L152:
movl $0x00000000, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L152
jmp .L151
.L158:
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L159
leaq .LC1(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L159:
call __stack_chk_fail@PLT
.L154:
movq %rax, %rdx
jmp .L151
.L150:
movq $0, resultsHost(%rip)
movq $0, 16+resultsHost(%rip)
movl $0, %edx
.L151:
movq %rdx, 8+resultsHost(%rip)
leaq __dso_handle(%rip), %r12
movq %r12, %rdx
leaq resultsHost(%rip), %rsi
leaq _ZNSt6vectorIfSaIfEED1Ev(%rip), %rbx
movq %rbx, %rdi
call __cxa_atexit@PLT
movl $0x00000000, 4(%rsp)
leaq 3(%rsp), %rcx
leaq 4(%rsp), %rdx
movslq N(%rip), %rsi
leaq wynikGPU(%rip), %rbp
movq %rbp, %rdi
call _ZNSt6vectorIfSaIfEEC1EmRKfRKS0_
movq %r12, %rdx
movq %rbp, %rsi
movq %rbx, %rdi
call __cxa_atexit@PLT
movl N(%rip), %eax
sall $2, %eax
movl %eax, typeSize(%rip)
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L160
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L160:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE5339:
.size _GLOBAL__sub_I_N, .-_GLOBAL__sub_I_N
.section .init_array
.align 8
.quad _GLOBAL__sub_I_N
.weak _ZTSNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE
.section .rodata._ZTSNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE,"aG",@progbits,_ZTSNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE,comdat
.align 32
.type _ZTSNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE, @object
.size _ZTSNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE, 62
_ZTSNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE:
.string "NSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE"
.weak _ZTINSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE
.section .data.rel.ro._ZTINSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE,"awG",@progbits,_ZTINSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE,comdat
.align 8
.type _ZTINSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE, @object
.size _ZTINSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE, 24
_ZTINSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE:
.quad _ZTVN10__cxxabiv120__si_class_type_infoE+16
.quad _ZTSNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE
.quad _ZTINSt6thread6_StateE
.weak _ZTVNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE
.section .data.rel.ro.local._ZTVNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE,"awG",@progbits,_ZTVNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE,comdat
.align 8
.type _ZTVNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE, @object
.size _ZTVNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE, 40
_ZTVNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE:
.quad 0
.quad _ZTINSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEEE
.quad _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED1Ev
.quad _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEED0Ev
.quad _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPiEEEEE6_M_runEv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl typeSize
.bss
.align 4
.type typeSize, @object
.size typeSize, 4
typeSize:
.zero 4
.globl dev_c1
.align 8
.type dev_c1, @object
.size dev_c1, 8
dev_c1:
.zero 8
.globl dev_b1
.align 8
.type dev_b1, @object
.size dev_b1, 8
dev_b1:
.zero 8
.globl dev_a1
.align 8
.type dev_a1, @object
.size dev_a1, 8
dev_a1:
.zero 8
.globl wynikGPU
.align 16
.type wynikGPU, @object
.size wynikGPU, 24
wynikGPU:
.zero 24
.globl resultsHost
.align 16
.type resultsHost, @object
.size resultsHost, 24
resultsHost:
.zero 24
.globl secondVector
.align 16
.type secondVector, @object
.size secondVector, 24
secondVector:
.zero 24
.globl firstVector
.align 16
.type firstVector, @object
.size firstVector, 24
firstVector:
.zero 24
.globl resultsGPUMatrix
.align 16
.type resultsGPUMatrix, @object
.size resultsGPUMatrix, 24
resultsGPUMatrix:
.zero 24
.globl resultsHostMatrix
.align 16
.type resultsHostMatrix, @object
.size resultsHostMatrix, 24
resultsHostMatrix:
.zero 24
.globl secondVectorMatrix
.align 16
.type secondVectorMatrix, @object
.size secondVectorMatrix, 24
secondVectorMatrix:
.zero 24
.globl firstMatrix
.align 16
.type firstMatrix, @object
.size firstMatrix, 24
firstMatrix:
.zero 24
.globl N
.data
.align 4
.type N, @object
.size N, 4
N:
.long 20000
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 1232348160
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.hidden __dso_handle
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //STL
#include <iostream>
#include <vector>
#include <thread>
#include <time.h>
using namespace std;
////////////////////////////////////////// HOST //////////////////////////////////////////////////////
int N = 20000; //GPU calculations are effective for N > 8k
vector < vector < float > > firstMatrix( N );
vector < vector < float > > secondVectorMatrix( N );
vector < vector < float > > resultsHostMatrix( N );
vector < vector< float > > resultsGPUMatrix( N );
vector < float > firstVector( N, 3.14 );
vector < float > secondVector( N, 2.72 );
vector < float > resultsHost( N );
vector < float > wynikGPU( N, 0 );
////////////////////////////////////////// GPU ////////////////////////////////////////////////////////
float *dev_a1 = 0; float *dev_b1 = 0; float *dev_c1 = 0;
int typeSize = N * sizeof( float );
__global__ void add( float *a, float *b, float *c, int N ) //GPU
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while ( tid < N )
{
c[ tid ] = a[ tid ] + b[ tid ];
tid += blockDim.x * gridDim.x;
}
}
void memInit()
{
//GPU memory allocation
cudaMalloc( ( void** )&dev_a1, typeSize );
cudaMalloc( ( void** )&dev_b1, typeSize );
cudaMalloc( ( void** )&dev_c1, typeSize );
}
void memFree()
{
//free GPU objects
cudaFree( dev_a1 );
cudaFree( dev_b1 );
cudaFree( dev_c1 );
}
void addMatrixKernel( void *iter )
{
if ( *( int * )iter < N )
{
const int nThreads = 1024;
//copy / download data in direction HostToDevice
cudaMemcpyAsync( dev_a1, &firstMatrix[ *( int * ) iter ][ 0 ], typeSize, cudaMemcpyHostToDevice );
cudaMemcpyAsync( dev_b1, &secondVectorMatrix[ *( int * ) iter ][ 0 ], typeSize, cudaMemcpyHostToDevice );
//calculate vectors sum, using max. number of possible 1D Threads per Block
add<<< ( N + nThreads - 1 ) / nThreads, nThreads >>> ( dev_a1, dev_b1, dev_c1, N );
//copy / upload results data c[] in direction DeviceToHost
cudaMemcpyAsync( &resultsGPUMatrix[ *( int * ) iter ][ 0 ], dev_c1, typeSize, cudaMemcpyDeviceToHost );
}
}
////////////////////////////////////////// MAIN ///////////////////////////////////////////////////////
int main ()
{
for ( int i = 0; i < N; i++ ) //basic data processing on Host CPU
{
firstMatrix[ i ] = firstVector;
secondVectorMatrix[ i ] = secondVector;
resultsGPUMatrix[ i ] = wynikGPU;
resultsHostMatrix[ i ] = resultsHost;
}
clock_t t;
t = clock();
for ( int j = 0; j < N; j++ )
for ( int i = 0; i < N; i++ )
resultsHostMatrix[ j ][ i ] = firstMatrix[ j ][ i ] + secondVectorMatrix[ j ][ i ];
cout << "sequential CPU calculations Host time: " << ((float)(clock() - t))/CLOCKS_PER_SEC << "[s] ( g++ )" << endl;
t = clock();
memInit();
vector < thread > gpuAsync3( N );
for ( int i = 0; i < N; i++ )
{
int *iPtr = &( i );
gpuAsync3[ i ] = thread( addMatrixKernel, iPtr );
gpuAsync3[ i ].join();
}
memFree();
cout << "Async (single join() + trivial Optimalization) vec<vec<>> GPU time: " << ((float)(clock() - t))/CLOCKS_PER_SEC << "[s]" << endl;
t = clock();
for ( int i = 0; i < 2; i++ )
{
cout << "resultsHostMatrix[ " << i << " ][ 0 ]: " << resultsHostMatrix[ i ][ 0 ] << endl;
cout << "resultsGPUMatrix[ " << i << " ][ 0 ]: " << resultsGPUMatrix[ i ][ 0 ] << endl;
}
cudaDeviceReset();
return 0;
} | //STL
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include <thread>
#include <time.h>
using namespace std;
////////////////////////////////////////// HOST //////////////////////////////////////////////////////
int N = 20000; //GPU calculations are effective for N > 8k
vector < vector < float > > firstMatrix( N );
vector < vector < float > > secondVectorMatrix( N );
vector < vector < float > > resultsHostMatrix( N );
vector < vector< float > > resultsGPUMatrix( N );
vector < float > firstVector( N, 3.14 );
vector < float > secondVector( N, 2.72 );
vector < float > resultsHost( N );
vector < float > wynikGPU( N, 0 );
////////////////////////////////////////// GPU ////////////////////////////////////////////////////////
float *dev_a1 = 0; float *dev_b1 = 0; float *dev_c1 = 0;
int typeSize = N * sizeof( float );
__global__ void add( float *a, float *b, float *c, int N ) //GPU
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while ( tid < N )
{
c[ tid ] = a[ tid ] + b[ tid ];
tid += blockDim.x * gridDim.x;
}
}
void memInit()
{
//GPU memory allocation
hipMalloc( ( void** )&dev_a1, typeSize );
hipMalloc( ( void** )&dev_b1, typeSize );
hipMalloc( ( void** )&dev_c1, typeSize );
}
void memFree()
{
//free GPU objects
hipFree( dev_a1 );
hipFree( dev_b1 );
hipFree( dev_c1 );
}
void addMatrixKernel( void *iter )
{
if ( *( int * )iter < N )
{
const int nThreads = 1024;
//copy / download data in direction HostToDevice
hipMemcpyAsync( dev_a1, &firstMatrix[ *( int * ) iter ][ 0 ], typeSize, hipMemcpyHostToDevice );
hipMemcpyAsync( dev_b1, &secondVectorMatrix[ *( int * ) iter ][ 0 ], typeSize, hipMemcpyHostToDevice );
//calculate vectors sum, using max. number of possible 1D Threads per Block
add<<< ( N + nThreads - 1 ) / nThreads, nThreads >>> ( dev_a1, dev_b1, dev_c1, N );
//copy / upload results data c[] in direction DeviceToHost
hipMemcpyAsync( &resultsGPUMatrix[ *( int * ) iter ][ 0 ], dev_c1, typeSize, hipMemcpyDeviceToHost );
}
}
////////////////////////////////////////// MAIN ///////////////////////////////////////////////////////
int main ()
{
for ( int i = 0; i < N; i++ ) //basic data processing on Host CPU
{
firstMatrix[ i ] = firstVector;
secondVectorMatrix[ i ] = secondVector;
resultsGPUMatrix[ i ] = wynikGPU;
resultsHostMatrix[ i ] = resultsHost;
}
clock_t t;
t = clock();
for ( int j = 0; j < N; j++ )
for ( int i = 0; i < N; i++ )
resultsHostMatrix[ j ][ i ] = firstMatrix[ j ][ i ] + secondVectorMatrix[ j ][ i ];
cout << "sequential CPU calculations Host time: " << ((float)(clock() - t))/CLOCKS_PER_SEC << "[s] ( g++ )" << endl;
t = clock();
memInit();
vector < thread > gpuAsync3( N );
for ( int i = 0; i < N; i++ )
{
int *iPtr = &( i );
gpuAsync3[ i ] = thread( addMatrixKernel, iPtr );
gpuAsync3[ i ].join();
}
memFree();
cout << "Async (single join() + trivial Optimalization) vec<vec<>> GPU time: " << ((float)(clock() - t))/CLOCKS_PER_SEC << "[s]" << endl;
t = clock();
for ( int i = 0; i < 2; i++ )
{
cout << "resultsHostMatrix[ " << i << " ][ 0 ]: " << resultsHostMatrix[ i ][ 0 ] << endl;
cout << "resultsGPUMatrix[ " << i << " ][ 0 ]: " << resultsGPUMatrix[ i ][ 0 ] << endl;
}
hipDeviceReset();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //STL
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include <thread>
#include <time.h>
using namespace std;
////////////////////////////////////////// HOST //////////////////////////////////////////////////////
int N = 20000; //GPU calculations are effective for N > 8k
vector < vector < float > > firstMatrix( N );
vector < vector < float > > secondVectorMatrix( N );
vector < vector < float > > resultsHostMatrix( N );
vector < vector< float > > resultsGPUMatrix( N );
vector < float > firstVector( N, 3.14 );
vector < float > secondVector( N, 2.72 );
vector < float > resultsHost( N );
vector < float > wynikGPU( N, 0 );
////////////////////////////////////////// GPU ////////////////////////////////////////////////////////
float *dev_a1 = 0; float *dev_b1 = 0; float *dev_c1 = 0;
int typeSize = N * sizeof( float );
__global__ void add( float *a, float *b, float *c, int N ) //GPU
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while ( tid < N )
{
c[ tid ] = a[ tid ] + b[ tid ];
tid += blockDim.x * gridDim.x;
}
}
void memInit()
{
//GPU memory allocation
hipMalloc( ( void** )&dev_a1, typeSize );
hipMalloc( ( void** )&dev_b1, typeSize );
hipMalloc( ( void** )&dev_c1, typeSize );
}
void memFree()
{
//free GPU objects
hipFree( dev_a1 );
hipFree( dev_b1 );
hipFree( dev_c1 );
}
void addMatrixKernel( void *iter )
{
if ( *( int * )iter < N )
{
const int nThreads = 1024;
//copy / download data in direction HostToDevice
hipMemcpyAsync( dev_a1, &firstMatrix[ *( int * ) iter ][ 0 ], typeSize, hipMemcpyHostToDevice );
hipMemcpyAsync( dev_b1, &secondVectorMatrix[ *( int * ) iter ][ 0 ], typeSize, hipMemcpyHostToDevice );
//calculate vectors sum, using max. number of possible 1D Threads per Block
add<<< ( N + nThreads - 1 ) / nThreads, nThreads >>> ( dev_a1, dev_b1, dev_c1, N );
//copy / upload results data c[] in direction DeviceToHost
hipMemcpyAsync( &resultsGPUMatrix[ *( int * ) iter ][ 0 ], dev_c1, typeSize, hipMemcpyDeviceToHost );
}
}
////////////////////////////////////////// MAIN ///////////////////////////////////////////////////////
int main ()
{
for ( int i = 0; i < N; i++ ) //basic data processing on Host CPU
{
firstMatrix[ i ] = firstVector;
secondVectorMatrix[ i ] = secondVector;
resultsGPUMatrix[ i ] = wynikGPU;
resultsHostMatrix[ i ] = resultsHost;
}
clock_t t;
t = clock();
for ( int j = 0; j < N; j++ )
for ( int i = 0; i < N; i++ )
resultsHostMatrix[ j ][ i ] = firstMatrix[ j ][ i ] + secondVectorMatrix[ j ][ i ];
cout << "sequential CPU calculations Host time: " << ((float)(clock() - t))/CLOCKS_PER_SEC << "[s] ( g++ )" << endl;
t = clock();
memInit();
vector < thread > gpuAsync3( N );
for ( int i = 0; i < N; i++ )
{
int *iPtr = &( i );
gpuAsync3[ i ] = thread( addMatrixKernel, iPtr );
gpuAsync3[ i ].join();
}
memFree();
cout << "Async (single join() + trivial Optimalization) vec<vec<>> GPU time: " << ((float)(clock() - t))/CLOCKS_PER_SEC << "[s]" << endl;
t = clock();
for ( int i = 0; i < 2; i++ )
{
cout << "resultsHostMatrix[ " << i << " ][ 0 ]: " << resultsHostMatrix[ i ][ 0 ] << endl;
cout << "resultsGPUMatrix[ " << i << " ][ 0 ]: " << resultsGPUMatrix[ i ][ 0 ] << endl;
}
hipDeviceReset();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPfS_S_i
.globl _Z3addPfS_S_i
.p2align 8
.type _Z3addPfS_S_i,@function
_Z3addPfS_S_i:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_3
s_load_b32 s10, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s10, s9
s_mov_b32 s9, 0
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, s0, s2, v2
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[6:7], off
v_add_nc_u32_e32 v1, s1, v1
v_add_co_ci_u32_e64 v3, s0, s3, v3, s0
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s8, v1
global_store_b32 v[2:3], v0, off
s_or_b32 s9, vcc_lo, s9
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPfS_S_i, .Lfunc_end0-_Z3addPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x001fd400000001ff */
/*0080*/ IMAD.WIDE R2, R0, R7, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R4, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe200078e0207 */
/*00d0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */
/* 0x000fca0000000f00 */
/*00e0*/ IMAD R0, R11, c[0x0][0xc], R0 ; /* 0x000003000b007a24 */
/* 0x000fca00078e0200 */
/*00f0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*0100*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*0110*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0001ee000c101904 */
/*0120*/ @!P0 BRA 0x70 ; /* 0xffffff4000008947 */
/* 0x000fea000383ffff */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPfS_S_i
.globl _Z3addPfS_S_i
.p2align 8
.type _Z3addPfS_S_i,@function
_Z3addPfS_S_i:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_3
s_load_b32 s10, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s10, s9
s_mov_b32 s9, 0
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, s0, s2, v2
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[6:7], off
v_add_nc_u32_e32 v1, s1, v1
v_add_co_ci_u32_e64 v3, s0, s3, v3, s0
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s8, v1
global_store_b32 v[2:3], v0, off
s_or_b32 s9, vcc_lo, s9
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPfS_S_i, .Lfunc_end0-_Z3addPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
int main(int argc, char **argv)
{
printf("Hallo World from CPU!\n");
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
int main(int argc, char **argv)
{
printf("Hallo World from CPU!\n");
} | .file "tmpxft_0002e493_00000000-6_hello.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hallo World from CPU!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
int main(int argc, char **argv)
{
printf("Hallo World from CPU!\n");
} | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv)
{
printf("Hallo World from CPU!\n");
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv)
{
printf("Hallo World from CPU!\n");
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv)
{
printf("Hallo World from CPU!\n");
} | .text
.file "hello.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movl $.Lstr, %edi
callq puts@PLT
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hallo World from CPU!"
.size .Lstr, 22
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0002e493_00000000-6_hello.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hallo World from CPU!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "hello.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movl $.Lstr, %edi
callq puts@PLT
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hallo World from CPU!"
.size .Lstr, 22
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
/*
* file name: mm_omp_vs_cuda.cu
*
* mm_omp_vs_cuda.cu contains the code that realize some common used matrix operations in CUDA, and
* an implementation of matrix multiplication speedup via openmp, this is a practice to compare the
* of performance of cuda and openmp, as well as a trail of using cuda and openmp in the same program
*
* this is a toy program for learning CUDA, some functions are reusable in other project
* note:
* compile: nvcc -Xcompiler \-fopenmp -lgomp mm_omp_vs_cuda.cu
*/
#define BLOCK_SIZE 16
/*
*********************************************************************
function name: gpu_matrix_mult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU device pointer to a n X n matrix (A)
&b CPU device pointer to a n X n matrix (B)
&c CPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: gpu_matrix_transpose
description: matrix transpose
parameters:
&mat_in GPU device pointer to a rows X cols matrix
&mat_out GPU device output purpose pointer to a cols X rows matrix
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU host pointer to a m X n matrix (A)
&b CPU host pointer to a n X k matrix (B)
&c CPU host output purpose pointer to a m X k matrix (C)
to store the result
return: none
*********************************************************************
*/
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} | code for sm_80
Function : _Z15gpu_matrix_multPiS_S_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e280000002100 */
/*0030*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R4, c[0x0][0x17c] ; /* 0x00005f0000047a02 */
/* 0x000fe20000000f00 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R24, -RZ, RZ, 0, 0 ; /* 0x00000000ff187435 */
/* 0x000fe400000001ff */
/*00d0*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fda0003f06270 */
/*00e0*/ @!P0 BRA 0xc30 ; /* 0x00000b4000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R2, R4.reuse, -0x1, RZ ; /* 0xffffffff04027810 */
/* 0x040fe40007ffe0ff */
/*0100*/ LOP3.LUT R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fe400078ec0ff */
/*0110*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f06070 */
/*0120*/ MOV R2, RZ ; /* 0x000000ff00027202 */
/* 0x000fe40000000f00 */
/*0130*/ MOV R24, RZ ; /* 0x000000ff00187202 */
/* 0x000fd20000000f00 */
/*0140*/ @!P0 BRA 0xb20 ; /* 0x000009d000008947 */
/* 0x000fea0003800000 */
/*0150*/ IADD3 R5, -R4, c[0x0][0x17c], RZ ; /* 0x00005f0004057a10 */
/* 0x000fe20007ffe1ff */
/*0160*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0170*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0180*/ IMAD R6, R3, c[0x0][0x17c], RZ ; /* 0x00005f0003067a24 */
/* 0x000fe200078e02ff */
/*0190*/ ISETP.GT.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f04270 */
/*01a0*/ MOV R2, RZ ; /* 0x000000ff00027202 */
/* 0x000fca0000000f00 */
/*01b0*/ IMAD.WIDE R8, R0, R9, c[0x0][0x168] ; /* 0x00005a0000087625 */
/* 0x000fcc00078e0209 */
/*01c0*/ @!P0 BRA 0x980 ; /* 0x000007b000008947 */
/* 0x000fea0003800000 */
/*01d0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */
/* 0x000fe40003f24270 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01f0*/ @!P1 BRA 0x6b0 ; /* 0x000004b000009947 */
/* 0x000fea0003800000 */
/*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0210*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0220*/ LDG.E R21, [R8.64] ; /* 0x0000000408157981 */
/* 0x0000a2000c1e1900 */
/*0230*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0240*/ IMAD.WIDE R12, R6, 0x4, R12 ; /* 0x00000004060c7825 */
/* 0x000fca00078e020c */
/*0250*/ LDG.E R20, [R12.64] ; /* 0x000000040c147981 */
/* 0x000ea2000c1e1900 */
/*0260*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */
/* 0x000fc60000000f00 */
/*0270*/ LDG.E R14, [R12.64+0x4] ; /* 0x000004040c0e7981 */
/* 0x000ee4000c1e1900 */
/*0280*/ IMAD.WIDE R10, R7.reuse, 0x4, R8 ; /* 0x00000004070a7825 */
/* 0x040fe400078e0208 */
/*0290*/ LDG.E R27, [R12.64+0x8] ; /* 0x000008040c1b7981 */
/* 0x000f28000c1e1900 */
/*02a0*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */
/* 0x0002e2000c1e1900 */
/*02b0*/ IMAD.WIDE R22, R7, 0x4, R10 ; /* 0x0000000407167825 */
/* 0x000fc600078e020a */
/*02c0*/ LDG.E R18, [R12.64+0xc] ; /* 0x00000c040c127981 */
/* 0x000f66000c1e1900 */
/*02d0*/ IMAD.WIDE R28, R7.reuse, 0x4, R22 ; /* 0x00000004071c7825 */
/* 0x040fe200078e0216 */
/*02e0*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x000328000c1e1900 */
/*02f0*/ LDG.E R19, [R28.64] ; /* 0x000000041c137981 */
/* 0x000362000c1e1900 */
/*0300*/ IMAD.WIDE R16, R7, 0x4, R28 ; /* 0x0000000407107825 */
/* 0x000fc600078e021c */
/*0310*/ LDG.E R8, [R12.64+0x10] ; /* 0x000010040c087981 */
/* 0x001f68000c1e1900 */
/*0320*/ LDG.E R9, [R16.64] ; /* 0x0000000410097981 */
/* 0x000168000c1e1900 */
/*0330*/ LDG.E R10, [R12.64+0x14] ; /* 0x000014040c0a7981 */
/* 0x002f68000c1e1900 */
/*0340*/ LDG.E R28, [R12.64+0x1c] ; /* 0x00001c040c1c7981 */
/* 0x000f62000c1e1900 */
/*0350*/ IMAD.WIDE R16, R7, 0x4, R16 ; /* 0x0000000407107825 */
/* 0x001fca00078e0210 */
/*0360*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */
/* 0x000562000c1e1900 */
/*0370*/ IMAD.WIDE R22, R7, 0x4, R16 ; /* 0x0000000407167825 */
/* 0x000fc800078e0210 */
/*0380*/ IMAD R16, R21, R20, R24 ; /* 0x0000001415107224 */
/* 0x004fe400078e0218 */
/*0390*/ LDG.E R20, [R12.64+0x18] ; /* 0x000018040c147981 */
/* 0x000ea2000c1e1900 */
/*03a0*/ IMAD.WIDE R24, R7, 0x4, R22 ; /* 0x0000000407187825 */
/* 0x000fc600078e0216 */
/*03b0*/ LDG.E R21, [R22.64] ; /* 0x0000000416157981 */
/* 0x0000a8000c1e1900 */
/*03c0*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x0002a2000c1e1900 */
/*03d0*/ IMAD R16, R15, R14, R16 ; /* 0x0000000e0f107224 */
/* 0x008fe400078e0210 */
/*03e0*/ IMAD.WIDE R14, R7.reuse, 0x4, R24 ; /* 0x00000004070e7825 */
/* 0x040fe200078e0218 */
/*03f0*/ LDG.E R23, [R12.64+0x20] ; /* 0x000020040c177981 */
/* 0x001ee6000c1e1900 */
/*0400*/ IMAD R26, R26, R27, R16 ; /* 0x0000001b1a1a7224 */
/* 0x010fe200078e0210 */
/*0410*/ LDG.E R25, [R12.64+0x24] ; /* 0x000024040c197981 */
/* 0x002f22000c1e1900 */
/*0420*/ IMAD.WIDE R16, R7, 0x4, R14 ; /* 0x0000000407107825 */
/* 0x000fc600078e020e */
/*0430*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0000e2000c1e1900 */
/*0440*/ IMAD R26, R19, R18, R26 ; /* 0x00000012131a7224 */
/* 0x020fe400078e021a */
/*0450*/ IMAD.WIDE R18, R7, 0x4, R16 ; /* 0x0000000407127825 */
/* 0x000fe200078e0210 */
/*0460*/ LDG.E R22, [R12.64+0x28] ; /* 0x000028040c167981 */
/* 0x000f66000c1e1900 */
/*0470*/ IMAD R26, R9, R8, R26 ; /* 0x00000008091a7224 */
/* 0x000fe200078e021a */
/*0480*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000322000c1e1900 */
/*0490*/ IMAD.WIDE R8, R7, 0x4, R18 ; /* 0x0000000407087825 */
/* 0x000fc600078e0212 */
/*04a0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000368000c1e1900 */
/*04b0*/ LDG.E R24, [R8.64] ; /* 0x0000000408187981 */
/* 0x000568000c1e1900 */
/*04c0*/ LDG.E R15, [R12.64+0x2c] ; /* 0x00002c040c0f7981 */
/* 0x001f62000c1e1900 */
/*04d0*/ IMAD R26, R11, R10, R26 ; /* 0x0000000a0b1a7224 */
/* 0x000fe400078e021a */
/*04e0*/ IMAD.WIDE R10, R7, 0x4, R8 ; /* 0x00000004070a7825 */
/* 0x000fe200078e0208 */
/*04f0*/ LDG.E R17, [R12.64+0x30] ; /* 0x000030040c117981 */
/* 0x002f66000c1e1900 */
/*0500*/ IMAD R26, R21, R20, R26 ; /* 0x00000014151a7224 */
/* 0x004fc400078e021a */
/*0510*/ IMAD.WIDE R20, R7, 0x4, R10 ; /* 0x0000000407147825 */
/* 0x000fe400078e020a */
/*0520*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x0000a4000c1e1900 */
/*0530*/ IMAD R28, R29, R28, R26 ; /* 0x0000001c1d1c7224 */
/* 0x000fe400078e021a */
/*0540*/ IMAD.WIDE R26, R7.reuse, 0x4, R20 ; /* 0x00000004071a7825 */
/* 0x040fe200078e0214 */
/*0550*/ LDG.E R29, [R12.64+0x34] ; /* 0x000034040c1d7981 */
/* 0x000ea8000c1e1900 */
/*0560*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x0002a2000c1e1900 */
/*0570*/ IMAD.WIDE R8, R7, 0x4, R26 ; /* 0x0000000407087825 */
/* 0x000fc600078e021a */
/*0580*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x0006a8000c1e1900 */
/*0590*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */
/* 0x0010a8000c1e1900 */
/*05a0*/ LDG.E R21, [R12.64+0x38] ; /* 0x000038040c157981 */
/* 0x002ea8000c1e1900 */
/*05b0*/ LDG.E R26, [R12.64+0x3c] ; /* 0x00003c040c1a7981 */
/* 0x008ee2000c1e1900 */
/*05c0*/ IMAD R14, R14, R23, R28 ; /* 0x000000170e0e7224 */
/* 0x000fc800078e021c */
/*05d0*/ IMAD R25, R16, R25, R14 ; /* 0x0000001910197224 */
/* 0x010fe200078e020e */
/*05e0*/ IADD3 R5, R5, -0x10, RZ ; /* 0xfffffff005057810 */
/* 0x000fc60007ffe0ff */
/*05f0*/ IMAD R18, R18, R22, R25 ; /* 0x0000001612127224 */
/* 0x020fe200078e0219 */
/*0600*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */
/* 0x000fc60003f24270 */
/*0610*/ IMAD R15, R24, R15, R18 ; /* 0x0000000f180f7224 */
/* 0x000fe200078e0212 */
/*0620*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0630*/ IMAD.WIDE R8, R7, 0x4, R8 ; /* 0x0000000407087825 */
/* 0x001fc600078e0208 */
/*0640*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0650*/ IADD3 R2, R2, 0x10, RZ ; /* 0x0000001002027810 */
/* 0x000fe20007ffe0ff */
/*0660*/ IMAD R10, R10, R17, R15 ; /* 0x000000110a0a7224 */
/* 0x004fc800078e020f */
/*0670*/ IMAD R10, R20, R29, R10 ; /* 0x0000001d140a7224 */
/* 0x000fc800078e020a */
/*0680*/ IMAD R10, R19, R21, R10 ; /* 0x00000015130a7224 */
/* 0x000fc800078e020a */
/*0690*/ IMAD R24, R11, R26, R10 ; /* 0x0000001a0b187224 */
/* 0x008fe200078e020a */
/*06a0*/ @P1 BRA 0x210 ; /* 0xfffffb6000001947 */
/* 0x000fea000383ffff */
/*06b0*/ ISETP.GT.AND P1, PT, R5, 0x4, PT ; /* 0x000000040500780c */
/* 0x000fda0003f24270 */
/*06c0*/ @!P1 BRA 0x960 ; /* 0x0000029000009947 */
/* 0x000fea0003800000 */
/*06d0*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */
/* 0x000fe20000000f00 */
/*06e0*/ LDG.E R23, [R8.64] ; /* 0x0000000408177981 */
/* 0x0000a2000c1e1900 */
/*06f0*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe40008000f00 */
/*0700*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe20008000f00 */
/*0710*/ IMAD.WIDE R16, R7, 0x4, R8 ; /* 0x0000000407107825 */
/* 0x000fc800078e0208 */
/*0720*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */
/* 0x000fc800078e020a */
/*0730*/ IMAD.WIDE R12, R7.reuse, 0x4, R16 ; /* 0x00000004070c7825 */
/* 0x040fe200078e0210 */
/*0740*/ LDG.E R22, [R10.64] ; /* 0x000000040a167981 */
/* 0x000ea8000c1e1900 */
/*0750*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x0002e2000c1e1900 */
/*0760*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */
/* 0x000fc600078e020c */
/*0770*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee6000c1e1900 */
/*0780*/ IMAD.WIDE R18, R7.reuse, 0x4, R14 ; /* 0x0000000407127825 */
/* 0x040fe200078e020e */
/*0790*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */
/* 0x000968000c1e1900 */
/*07a0*/ LDG.E R27, [R10.64+0x8] ; /* 0x000008040a1b7981 */
/* 0x000f62000c1e1900 */
/*07b0*/ IMAD.WIDE R20, R7, 0x4, R18 ; /* 0x0000000407147825 */
/* 0x000fc600078e0212 */
/*07c0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000368000c1e1900 */
/*07d0*/ LDG.E R29, [R10.64+0xc] ; /* 0x00000c040a1d7981 */
/* 0x000f62000c1e1900 */
/*07e0*/ IMAD.WIDE R8, R7, 0x4, R20 ; /* 0x0000000407087825 */
/* 0x001fc600078e0214 */
/*07f0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000168000c1e1900 */
/*0800*/ LDG.E R28, [R10.64+0x10] ; /* 0x000010040a1c7981 */
/* 0x000f62000c1e1900 */
/*0810*/ IMAD.WIDE R12, R7, 0x4, R8 ; /* 0x00000004070c7825 */
/* 0x010fc600078e0208 */
/*0820*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000968000c1e1900 */
/*0830*/ LDG.E R15, [R10.64+0x14] ; /* 0x000014040a0f7981 */
/* 0x002f68000c1e1900 */
/*0840*/ LDG.E R17, [R8.64] ; /* 0x0000000408117981 */
/* 0x000368000c1e1900 */
/*0850*/ LDG.E R21, [R10.64+0x1c] ; /* 0x00001c040a157981 */
/* 0x010f28000c1e1900 */
/*0860*/ LDG.E R19, [R12.64] ; /* 0x000000040c137981 */
/* 0x001f28000c1e1900 */
/*0870*/ LDG.E R8, [R10.64+0x18] ; /* 0x000018040a087981 */
/* 0x002f22000c1e1900 */
/*0880*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*0890*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*08a0*/ IADD3 R2, R2, 0x8, RZ ; /* 0x0000000802027810 */
/* 0x000fe40007ffe0ff */
/*08b0*/ IADD3 R5, R5, -0x8, RZ ; /* 0xfffffff805057810 */
/* 0x000fe20007ffe0ff */
/*08c0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08d0*/ IMAD R22, R23, R22, R24 ; /* 0x0000001617167224 */
/* 0x004fc800078e0218 */
/*08e0*/ IMAD R16, R16, R25, R22 ; /* 0x0000001910107224 */
/* 0x008fc800078e0216 */
/*08f0*/ IMAD R16, R26, R27, R16 ; /* 0x0000001b1a107224 */
/* 0x020fc800078e0210 */
/*0900*/ IMAD R29, R14, R29, R16 ; /* 0x0000001d0e1d7224 */
/* 0x000fc800078e0210 */
/*0910*/ IMAD R18, R18, R28, R29 ; /* 0x0000001c12127224 */
/* 0x000fc800078e021d */
/*0920*/ IMAD R15, R20, R15, R18 ; /* 0x0000000f140f7224 */
/* 0x000fc800078e0212 */
/*0930*/ IMAD R24, R17, R8, R15 ; /* 0x0000000811187224 */
/* 0x010fe400078e020f */
/*0940*/ IMAD.WIDE R8, R7, 0x4, R12 ; /* 0x0000000407087825 */
/* 0x000fc800078e020c */
/*0950*/ IMAD R24, R19, R21, R24 ; /* 0x0000001513187224 */
/* 0x000fe400078e0218 */
/*0960*/ ISETP.NE.OR P0, PT, R5, RZ, P0 ; /* 0x000000ff0500720c */
/* 0x000fda0000705670 */
/*0970*/ @!P0 BRA 0xb20 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0980*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe40008000f00 */
/*0990*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe40008000f00 */
/*09a0*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */
/* 0x000fc60000000f00 */
/*09b0*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */
/* 0x000fc800078e020a */
/*09c0*/ IMAD.WIDE R16, R7.reuse, 0x4, R8 ; /* 0x0000000407107825 */
/* 0x040fe200078e0208 */
/*09d0*/ LDG.E R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea8000c1e1900 */
/*09e0*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */
/* 0x000ea2000c1e1900 */
/*09f0*/ IMAD.WIDE R12, R7, 0x4, R16 ; /* 0x00000004070c7825 */
/* 0x000fc600078e0210 */
/*0a00*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ee8000c1e1900 */
/*0a10*/ LDG.E R19, [R10.64+0x4] ; /* 0x000004040a137981 */
/* 0x000ee2000c1e1900 */
/*0a20*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */
/* 0x000fc600078e020c */
/*0a30*/ LDG.E R21, [R12.64] ; /* 0x000000040c157981 */
/* 0x000f28000c1e1900 */
/*0a40*/ LDG.E R20, [R10.64+0x8] ; /* 0x000008040a147981 */
/* 0x000f28000c1e1900 */
/*0a50*/ LDG.E R22, [R10.64+0xc] ; /* 0x00000c040a167981 */
/* 0x000f68000c1e1900 */
/*0a60*/ LDG.E R23, [R14.64] ; /* 0x000000040e177981 */
/* 0x000f62000c1e1900 */
/*0a70*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */
/* 0x000fc80007ffe0ff */
/*0a80*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0a90*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0aa0*/ IADD3 R2, R2, 0x4, RZ ; /* 0x0000000402027810 */
/* 0x000fc60007ffe0ff */
/*0ab0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0ac0*/ IMAD R18, R9, R18, R24 ; /* 0x0000001209127224 */
/* 0x004fc800078e0218 */
/*0ad0*/ IMAD R18, R17, R19, R18 ; /* 0x0000001311127224 */
/* 0x008fe400078e0212 */
/*0ae0*/ IMAD.WIDE R8, R7, 0x4, R14 ; /* 0x0000000407087825 */
/* 0x000fc800078e020e */
/*0af0*/ IMAD R18, R21, R20, R18 ; /* 0x0000001415127224 */
/* 0x010fc800078e0212 */
/*0b00*/ IMAD R24, R23, R22, R18 ; /* 0x0000001617187224 */
/* 0x020fe200078e0212 */
/*0b10*/ @P0 BRA 0x980 ; /* 0xfffffe6000000947 */
/* 0x000fea000383ffff */
/*0b20*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fda0003f05270 */
/*0b30*/ @!P0 BRA 0xc30 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*0b40*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0b50*/ IMAD R6, R3, c[0x0][0x17c], R2 ; /* 0x00005f0003067a24 */
/* 0x000fe400078e0202 */
/*0b60*/ IMAD R2, R2, c[0x0][0x180], R0 ; /* 0x0000600002027a24 */
/* 0x000fce00078e0200 */
/*0b70*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0209 */
/*0b80*/ IMAD.WIDE R8, R2, R9, c[0x0][0x168] ; /* 0x00005a0002087625 */
/* 0x000fca00078e0209 */
/*0b90*/ LDG.E R5, [R8.64] ; /* 0x0000000408057981 */
/* 0x0000a8000c1e1900 */
/*0ba0*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */
/* 0x0002a2000c1e1900 */
/*0bb0*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */
/* 0x000fe40007ffe0ff */
/*0bc0*/ MOV R11, c[0x0][0x180] ; /* 0x00006000000b7a02 */
/* 0x000fe40000000f00 */
/*0bd0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc60003f05270 */
/*0be0*/ IMAD.WIDE R8, R11, 0x4, R8 ; /* 0x000000040b087825 */
/* 0x001fe200078e0208 */
/*0bf0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0c00*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0c10*/ IMAD R24, R5, R2, R24 ; /* 0x0000000205187224 */
/* 0x004fc800078e0218 */
/*0c20*/ @P0 BRA 0xb90 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0c30*/ MOV R2, 0x4 ; /* 0x0000000400027802 */
/* 0x000fe20000000f00 */
/*0c40*/ IMAD R3, R3, c[0x0][0x180], R0 ; /* 0x0000600003037a24 */
/* 0x000fc800078e0200 */
/*0c50*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fca00078e0202 */
/*0c60*/ STG.E [R2.64], R24 ; /* 0x0000001802007986 */
/* 0x000fe2000c101904 */
/*0c70*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c80*/ BRA 0xc80; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
/*
* file name: mm_omp_vs_cuda.cu
*
* mm_omp_vs_cuda.cu contains the code that realize some common used matrix operations in CUDA, and
* an implementation of matrix multiplication speedup via openmp, this is a practice to compare the
* of performance of cuda and openmp, as well as a trail of using cuda and openmp in the same program
*
* this is a toy program for learning CUDA, some functions are reusable in other project
* note:
* compile: nvcc -Xcompiler \-fopenmp -lgomp mm_omp_vs_cuda.cu
*/
#define BLOCK_SIZE 16
/*
*********************************************************************
function name: gpu_matrix_mult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU device pointer to a n X n matrix (A)
&b CPU device pointer to a n X n matrix (B)
&c CPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: gpu_matrix_transpose
description: matrix transpose
parameters:
&mat_in GPU device pointer to a rows X cols matrix
&mat_out GPU device output purpose pointer to a cols X rows matrix
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU host pointer to a m X n matrix (A)
&b CPU host pointer to a n X k matrix (B)
&c CPU host output purpose pointer to a m X k matrix (C)
to store the result
return: none
*********************************************************************
*/
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} | .file "tmpxft_00189d7e_00000000-6_gpu_matrix_mult.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii
.type _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii, @function
_Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15gpu_matrix_multPiS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii, .-_Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii
.globl _Z15gpu_matrix_multPiS_S_iii
.type _Z15gpu_matrix_multPiS_S_iii, @function
_Z15gpu_matrix_multPiS_S_iii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15gpu_matrix_multPiS_S_iii, .-_Z15gpu_matrix_multPiS_S_iii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z15gpu_matrix_multPiS_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15gpu_matrix_multPiS_S_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
/*
* file name: mm_omp_vs_cuda.cu
*
* mm_omp_vs_cuda.cu contains the code that realize some common used matrix operations in CUDA, and
* an implementation of matrix multiplication speedup via openmp, this is a practice to compare the
* of performance of cuda and openmp, as well as a trail of using cuda and openmp in the same program
*
* this is a toy program for learning CUDA, some functions are reusable in other project
* note:
* compile: nvcc -Xcompiler \-fopenmp -lgomp mm_omp_vs_cuda.cu
*/
#define BLOCK_SIZE 16
/*
*********************************************************************
function name: gpu_matrix_mult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU device pointer to a n X n matrix (A)
&b CPU device pointer to a n X n matrix (B)
&c CPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: gpu_matrix_transpose
description: matrix transpose
parameters:
&mat_in GPU device pointer to a rows X cols matrix
&mat_out GPU device output purpose pointer to a cols X rows matrix
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU host pointer to a m X n matrix (A)
&b CPU host pointer to a n X k matrix (B)
&c CPU host output purpose pointer to a m X k matrix (C)
to store the result
return: none
*********************************************************************
*/
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
/*
* file name: mm_omp_vs_cuda.cu
*
* mm_omp_vs_cuda.cu contains the code that realize some common used matrix operations in CUDA, and
* an implementation of matrix multiplication speedup via openmp, this is a practice to compare the
* of performance of cuda and openmp, as well as a trail of using cuda and openmp in the same program
*
* this is a toy program for learning CUDA, some functions are reusable in other project
* note:
* compile: nvcc -Xcompiler \-fopenmp -lgomp mm_omp_vs_cuda.cu
*/
#define BLOCK_SIZE 16
/*
*********************************************************************
function name: gpu_matrix_mult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU device pointer to a n X n matrix (A)
&b CPU device pointer to a n X n matrix (B)
&c CPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: gpu_matrix_transpose
description: matrix transpose
parameters:
&mat_in GPU device pointer to a rows X cols matrix
&mat_out GPU device output purpose pointer to a cols X rows matrix
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU host pointer to a m X n matrix (A)
&b CPU host pointer to a n X k matrix (B)
&c CPU host output purpose pointer to a m X k matrix (C)
to store the result
return: none
*********************************************************************
*/
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
* file name: mm_omp_vs_cuda.cu
*
* mm_omp_vs_cuda.cu contains the code that realize some common used matrix operations in CUDA, and
* an implementation of matrix multiplication speedup via openmp, this is a practice to compare the
* of performance of cuda and openmp, as well as a trail of using cuda and openmp in the same program
*
* this is a toy program for learning CUDA, some functions are reusable in other project
* note:
* compile: nvcc -Xcompiler \-fopenmp -lgomp mm_omp_vs_cuda.cu
*/
#define BLOCK_SIZE 16
/*
*********************************************************************
function name: gpu_matrix_mult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU device pointer to a n X n matrix (A)
&b CPU device pointer to a n X n matrix (B)
&c CPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: gpu_matrix_transpose
description: matrix transpose
parameters:
&mat_in GPU device pointer to a rows X cols matrix
&mat_out GPU device output purpose pointer to a cols X rows matrix
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU host pointer to a m X n matrix (A)
&b CPU host pointer to a n X k matrix (B)
&c CPU host output purpose pointer to a m X k matrix (C)
to store the result
return: none
*********************************************************************
*/
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15gpu_matrix_multPiS_S_iii
.globl _Z15gpu_matrix_multPiS_S_iii
.p2align 8
.type _Z15gpu_matrix_multPiS_S_iii,@function
_Z15gpu_matrix_multPiS_S_iii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x20
s_load_b32 s4, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s2, 16
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, s14, s2, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s5, v[3:4]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s3, v0
v_cmp_gt_i32_e64 s2, s4, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_6
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v2, v1, s2
v_mov_b32_e32 v5, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
.p2align 6
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v6, 31, v5
s_add_i32 s2, s2, -1
s_cmp_eq_u32 s2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[5:6]
v_add_co_u32 v6, vcc_lo, s6, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
global_load_b32 v8, v[3:4], off
global_load_b32 v9, v[6:7], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[6:7], null, v9, v8, v[2:3]
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_dual_mov_b32 v2, v6 :: v_dual_add_nc_u32 v5, s3, v5
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v2, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x10
v_mad_u64_u32 v[3:4], null, v1, s3, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15gpu_matrix_multPiS_S_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15gpu_matrix_multPiS_S_iii, .Lfunc_end0-_Z15gpu_matrix_multPiS_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15gpu_matrix_multPiS_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15gpu_matrix_multPiS_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
* file name: mm_omp_vs_cuda.cu
*
* mm_omp_vs_cuda.cu contains the code that realize some common used matrix operations in CUDA, and
* an implementation of matrix multiplication speedup via openmp, this is a practice to compare the
* of performance of cuda and openmp, as well as a trail of using cuda and openmp in the same program
*
* this is a toy program for learning CUDA, some functions are reusable in other project
* note:
* compile: nvcc -Xcompiler \-fopenmp -lgomp mm_omp_vs_cuda.cu
*/
#define BLOCK_SIZE 16
/*
*********************************************************************
function name: gpu_matrix_mult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU device pointer to a n X n matrix (A)
&b CPU device pointer to a n X n matrix (B)
&c CPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: gpu_matrix_transpose
description: matrix transpose
parameters:
&mat_in GPU device pointer to a rows X cols matrix
&mat_out GPU device output purpose pointer to a cols X rows matrix
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU host pointer to a m X n matrix (A)
&b CPU host pointer to a n X k matrix (B)
&c CPU host output purpose pointer to a m X k matrix (C)
to store the result
return: none
*********************************************************************
*/
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} | .text
.file "gpu_matrix_mult.hip"
.globl _Z30__device_stub__gpu_matrix_multPiS_S_iii # -- Begin function _Z30__device_stub__gpu_matrix_multPiS_S_iii
.p2align 4, 0x90
.type _Z30__device_stub__gpu_matrix_multPiS_S_iii,@function
_Z30__device_stub__gpu_matrix_multPiS_S_iii: # @_Z30__device_stub__gpu_matrix_multPiS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15gpu_matrix_multPiS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z30__device_stub__gpu_matrix_multPiS_S_iii, .Lfunc_end0-_Z30__device_stub__gpu_matrix_multPiS_S_iii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15gpu_matrix_multPiS_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15gpu_matrix_multPiS_S_iii,@object # @_Z15gpu_matrix_multPiS_S_iii
.section .rodata,"a",@progbits
.globl _Z15gpu_matrix_multPiS_S_iii
.p2align 3, 0x0
_Z15gpu_matrix_multPiS_S_iii:
.quad _Z30__device_stub__gpu_matrix_multPiS_S_iii
.size _Z15gpu_matrix_multPiS_S_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15gpu_matrix_multPiS_S_iii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__gpu_matrix_multPiS_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15gpu_matrix_multPiS_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z15gpu_matrix_multPiS_S_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e280000002100 */
/*0030*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R4, c[0x0][0x17c] ; /* 0x00005f0000047a02 */
/* 0x000fe20000000f00 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R24, -RZ, RZ, 0, 0 ; /* 0x00000000ff187435 */
/* 0x000fe400000001ff */
/*00d0*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fda0003f06270 */
/*00e0*/ @!P0 BRA 0xc30 ; /* 0x00000b4000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R2, R4.reuse, -0x1, RZ ; /* 0xffffffff04027810 */
/* 0x040fe40007ffe0ff */
/*0100*/ LOP3.LUT R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fe400078ec0ff */
/*0110*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f06070 */
/*0120*/ MOV R2, RZ ; /* 0x000000ff00027202 */
/* 0x000fe40000000f00 */
/*0130*/ MOV R24, RZ ; /* 0x000000ff00187202 */
/* 0x000fd20000000f00 */
/*0140*/ @!P0 BRA 0xb20 ; /* 0x000009d000008947 */
/* 0x000fea0003800000 */
/*0150*/ IADD3 R5, -R4, c[0x0][0x17c], RZ ; /* 0x00005f0004057a10 */
/* 0x000fe20007ffe1ff */
/*0160*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0170*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0180*/ IMAD R6, R3, c[0x0][0x17c], RZ ; /* 0x00005f0003067a24 */
/* 0x000fe200078e02ff */
/*0190*/ ISETP.GT.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f04270 */
/*01a0*/ MOV R2, RZ ; /* 0x000000ff00027202 */
/* 0x000fca0000000f00 */
/*01b0*/ IMAD.WIDE R8, R0, R9, c[0x0][0x168] ; /* 0x00005a0000087625 */
/* 0x000fcc00078e0209 */
/*01c0*/ @!P0 BRA 0x980 ; /* 0x000007b000008947 */
/* 0x000fea0003800000 */
/*01d0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */
/* 0x000fe40003f24270 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01f0*/ @!P1 BRA 0x6b0 ; /* 0x000004b000009947 */
/* 0x000fea0003800000 */
/*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0210*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0220*/ LDG.E R21, [R8.64] ; /* 0x0000000408157981 */
/* 0x0000a2000c1e1900 */
/*0230*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0240*/ IMAD.WIDE R12, R6, 0x4, R12 ; /* 0x00000004060c7825 */
/* 0x000fca00078e020c */
/*0250*/ LDG.E R20, [R12.64] ; /* 0x000000040c147981 */
/* 0x000ea2000c1e1900 */
/*0260*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */
/* 0x000fc60000000f00 */
/*0270*/ LDG.E R14, [R12.64+0x4] ; /* 0x000004040c0e7981 */
/* 0x000ee4000c1e1900 */
/*0280*/ IMAD.WIDE R10, R7.reuse, 0x4, R8 ; /* 0x00000004070a7825 */
/* 0x040fe400078e0208 */
/*0290*/ LDG.E R27, [R12.64+0x8] ; /* 0x000008040c1b7981 */
/* 0x000f28000c1e1900 */
/*02a0*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */
/* 0x0002e2000c1e1900 */
/*02b0*/ IMAD.WIDE R22, R7, 0x4, R10 ; /* 0x0000000407167825 */
/* 0x000fc600078e020a */
/*02c0*/ LDG.E R18, [R12.64+0xc] ; /* 0x00000c040c127981 */
/* 0x000f66000c1e1900 */
/*02d0*/ IMAD.WIDE R28, R7.reuse, 0x4, R22 ; /* 0x00000004071c7825 */
/* 0x040fe200078e0216 */
/*02e0*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x000328000c1e1900 */
/*02f0*/ LDG.E R19, [R28.64] ; /* 0x000000041c137981 */
/* 0x000362000c1e1900 */
/*0300*/ IMAD.WIDE R16, R7, 0x4, R28 ; /* 0x0000000407107825 */
/* 0x000fc600078e021c */
/*0310*/ LDG.E R8, [R12.64+0x10] ; /* 0x000010040c087981 */
/* 0x001f68000c1e1900 */
/*0320*/ LDG.E R9, [R16.64] ; /* 0x0000000410097981 */
/* 0x000168000c1e1900 */
/*0330*/ LDG.E R10, [R12.64+0x14] ; /* 0x000014040c0a7981 */
/* 0x002f68000c1e1900 */
/*0340*/ LDG.E R28, [R12.64+0x1c] ; /* 0x00001c040c1c7981 */
/* 0x000f62000c1e1900 */
/*0350*/ IMAD.WIDE R16, R7, 0x4, R16 ; /* 0x0000000407107825 */
/* 0x001fca00078e0210 */
/*0360*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */
/* 0x000562000c1e1900 */
/*0370*/ IMAD.WIDE R22, R7, 0x4, R16 ; /* 0x0000000407167825 */
/* 0x000fc800078e0210 */
/*0380*/ IMAD R16, R21, R20, R24 ; /* 0x0000001415107224 */
/* 0x004fe400078e0218 */
/*0390*/ LDG.E R20, [R12.64+0x18] ; /* 0x000018040c147981 */
/* 0x000ea2000c1e1900 */
/*03a0*/ IMAD.WIDE R24, R7, 0x4, R22 ; /* 0x0000000407187825 */
/* 0x000fc600078e0216 */
/*03b0*/ LDG.E R21, [R22.64] ; /* 0x0000000416157981 */
/* 0x0000a8000c1e1900 */
/*03c0*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x0002a2000c1e1900 */
/*03d0*/ IMAD R16, R15, R14, R16 ; /* 0x0000000e0f107224 */
/* 0x008fe400078e0210 */
/*03e0*/ IMAD.WIDE R14, R7.reuse, 0x4, R24 ; /* 0x00000004070e7825 */
/* 0x040fe200078e0218 */
/*03f0*/ LDG.E R23, [R12.64+0x20] ; /* 0x000020040c177981 */
/* 0x001ee6000c1e1900 */
/*0400*/ IMAD R26, R26, R27, R16 ; /* 0x0000001b1a1a7224 */
/* 0x010fe200078e0210 */
/*0410*/ LDG.E R25, [R12.64+0x24] ; /* 0x000024040c197981 */
/* 0x002f22000c1e1900 */
/*0420*/ IMAD.WIDE R16, R7, 0x4, R14 ; /* 0x0000000407107825 */
/* 0x000fc600078e020e */
/*0430*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0000e2000c1e1900 */
/*0440*/ IMAD R26, R19, R18, R26 ; /* 0x00000012131a7224 */
/* 0x020fe400078e021a */
/*0450*/ IMAD.WIDE R18, R7, 0x4, R16 ; /* 0x0000000407127825 */
/* 0x000fe200078e0210 */
/*0460*/ LDG.E R22, [R12.64+0x28] ; /* 0x000028040c167981 */
/* 0x000f66000c1e1900 */
/*0470*/ IMAD R26, R9, R8, R26 ; /* 0x00000008091a7224 */
/* 0x000fe200078e021a */
/*0480*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000322000c1e1900 */
/*0490*/ IMAD.WIDE R8, R7, 0x4, R18 ; /* 0x0000000407087825 */
/* 0x000fc600078e0212 */
/*04a0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000368000c1e1900 */
/*04b0*/ LDG.E R24, [R8.64] ; /* 0x0000000408187981 */
/* 0x000568000c1e1900 */
/*04c0*/ LDG.E R15, [R12.64+0x2c] ; /* 0x00002c040c0f7981 */
/* 0x001f62000c1e1900 */
/*04d0*/ IMAD R26, R11, R10, R26 ; /* 0x0000000a0b1a7224 */
/* 0x000fe400078e021a */
/*04e0*/ IMAD.WIDE R10, R7, 0x4, R8 ; /* 0x00000004070a7825 */
/* 0x000fe200078e0208 */
/*04f0*/ LDG.E R17, [R12.64+0x30] ; /* 0x000030040c117981 */
/* 0x002f66000c1e1900 */
/*0500*/ IMAD R26, R21, R20, R26 ; /* 0x00000014151a7224 */
/* 0x004fc400078e021a */
/*0510*/ IMAD.WIDE R20, R7, 0x4, R10 ; /* 0x0000000407147825 */
/* 0x000fe400078e020a */
/*0520*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x0000a4000c1e1900 */
/*0530*/ IMAD R28, R29, R28, R26 ; /* 0x0000001c1d1c7224 */
/* 0x000fe400078e021a */
/*0540*/ IMAD.WIDE R26, R7.reuse, 0x4, R20 ; /* 0x00000004071a7825 */
/* 0x040fe200078e0214 */
/*0550*/ LDG.E R29, [R12.64+0x34] ; /* 0x000034040c1d7981 */
/* 0x000ea8000c1e1900 */
/*0560*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x0002a2000c1e1900 */
/*0570*/ IMAD.WIDE R8, R7, 0x4, R26 ; /* 0x0000000407087825 */
/* 0x000fc600078e021a */
/*0580*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x0006a8000c1e1900 */
/*0590*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */
/* 0x0010a8000c1e1900 */
/*05a0*/ LDG.E R21, [R12.64+0x38] ; /* 0x000038040c157981 */
/* 0x002ea8000c1e1900 */
/*05b0*/ LDG.E R26, [R12.64+0x3c] ; /* 0x00003c040c1a7981 */
/* 0x008ee2000c1e1900 */
/*05c0*/ IMAD R14, R14, R23, R28 ; /* 0x000000170e0e7224 */
/* 0x000fc800078e021c */
/*05d0*/ IMAD R25, R16, R25, R14 ; /* 0x0000001910197224 */
/* 0x010fe200078e020e */
/*05e0*/ IADD3 R5, R5, -0x10, RZ ; /* 0xfffffff005057810 */
/* 0x000fc60007ffe0ff */
/*05f0*/ IMAD R18, R18, R22, R25 ; /* 0x0000001612127224 */
/* 0x020fe200078e0219 */
/*0600*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */
/* 0x000fc60003f24270 */
/*0610*/ IMAD R15, R24, R15, R18 ; /* 0x0000000f180f7224 */
/* 0x000fe200078e0212 */
/*0620*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0630*/ IMAD.WIDE R8, R7, 0x4, R8 ; /* 0x0000000407087825 */
/* 0x001fc600078e0208 */
/*0640*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0650*/ IADD3 R2, R2, 0x10, RZ ; /* 0x0000001002027810 */
/* 0x000fe20007ffe0ff */
/*0660*/ IMAD R10, R10, R17, R15 ; /* 0x000000110a0a7224 */
/* 0x004fc800078e020f */
/*0670*/ IMAD R10, R20, R29, R10 ; /* 0x0000001d140a7224 */
/* 0x000fc800078e020a */
/*0680*/ IMAD R10, R19, R21, R10 ; /* 0x00000015130a7224 */
/* 0x000fc800078e020a */
/*0690*/ IMAD R24, R11, R26, R10 ; /* 0x0000001a0b187224 */
/* 0x008fe200078e020a */
/*06a0*/ @P1 BRA 0x210 ; /* 0xfffffb6000001947 */
/* 0x000fea000383ffff */
/*06b0*/ ISETP.GT.AND P1, PT, R5, 0x4, PT ; /* 0x000000040500780c */
/* 0x000fda0003f24270 */
/*06c0*/ @!P1 BRA 0x960 ; /* 0x0000029000009947 */
/* 0x000fea0003800000 */
/*06d0*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */
/* 0x000fe20000000f00 */
/*06e0*/ LDG.E R23, [R8.64] ; /* 0x0000000408177981 */
/* 0x0000a2000c1e1900 */
/*06f0*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe40008000f00 */
/*0700*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe20008000f00 */
/*0710*/ IMAD.WIDE R16, R7, 0x4, R8 ; /* 0x0000000407107825 */
/* 0x000fc800078e0208 */
/*0720*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */
/* 0x000fc800078e020a */
/*0730*/ IMAD.WIDE R12, R7.reuse, 0x4, R16 ; /* 0x00000004070c7825 */
/* 0x040fe200078e0210 */
/*0740*/ LDG.E R22, [R10.64] ; /* 0x000000040a167981 */
/* 0x000ea8000c1e1900 */
/*0750*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x0002e2000c1e1900 */
/*0760*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */
/* 0x000fc600078e020c */
/*0770*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee6000c1e1900 */
/*0780*/ IMAD.WIDE R18, R7.reuse, 0x4, R14 ; /* 0x0000000407127825 */
/* 0x040fe200078e020e */
/*0790*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */
/* 0x000968000c1e1900 */
/*07a0*/ LDG.E R27, [R10.64+0x8] ; /* 0x000008040a1b7981 */
/* 0x000f62000c1e1900 */
/*07b0*/ IMAD.WIDE R20, R7, 0x4, R18 ; /* 0x0000000407147825 */
/* 0x000fc600078e0212 */
/*07c0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000368000c1e1900 */
/*07d0*/ LDG.E R29, [R10.64+0xc] ; /* 0x00000c040a1d7981 */
/* 0x000f62000c1e1900 */
/*07e0*/ IMAD.WIDE R8, R7, 0x4, R20 ; /* 0x0000000407087825 */
/* 0x001fc600078e0214 */
/*07f0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000168000c1e1900 */
/*0800*/ LDG.E R28, [R10.64+0x10] ; /* 0x000010040a1c7981 */
/* 0x000f62000c1e1900 */
/*0810*/ IMAD.WIDE R12, R7, 0x4, R8 ; /* 0x00000004070c7825 */
/* 0x010fc600078e0208 */
/*0820*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000968000c1e1900 */
/*0830*/ LDG.E R15, [R10.64+0x14] ; /* 0x000014040a0f7981 */
/* 0x002f68000c1e1900 */
/*0840*/ LDG.E R17, [R8.64] ; /* 0x0000000408117981 */
/* 0x000368000c1e1900 */
/*0850*/ LDG.E R21, [R10.64+0x1c] ; /* 0x00001c040a157981 */
/* 0x010f28000c1e1900 */
/*0860*/ LDG.E R19, [R12.64] ; /* 0x000000040c137981 */
/* 0x001f28000c1e1900 */
/*0870*/ LDG.E R8, [R10.64+0x18] ; /* 0x000018040a087981 */
/* 0x002f22000c1e1900 */
/*0880*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*0890*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*08a0*/ IADD3 R2, R2, 0x8, RZ ; /* 0x0000000802027810 */
/* 0x000fe40007ffe0ff */
/*08b0*/ IADD3 R5, R5, -0x8, RZ ; /* 0xfffffff805057810 */
/* 0x000fe20007ffe0ff */
/*08c0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08d0*/ IMAD R22, R23, R22, R24 ; /* 0x0000001617167224 */
/* 0x004fc800078e0218 */
/*08e0*/ IMAD R16, R16, R25, R22 ; /* 0x0000001910107224 */
/* 0x008fc800078e0216 */
/*08f0*/ IMAD R16, R26, R27, R16 ; /* 0x0000001b1a107224 */
/* 0x020fc800078e0210 */
/*0900*/ IMAD R29, R14, R29, R16 ; /* 0x0000001d0e1d7224 */
/* 0x000fc800078e0210 */
/*0910*/ IMAD R18, R18, R28, R29 ; /* 0x0000001c12127224 */
/* 0x000fc800078e021d */
/*0920*/ IMAD R15, R20, R15, R18 ; /* 0x0000000f140f7224 */
/* 0x000fc800078e0212 */
/*0930*/ IMAD R24, R17, R8, R15 ; /* 0x0000000811187224 */
/* 0x010fe400078e020f */
/*0940*/ IMAD.WIDE R8, R7, 0x4, R12 ; /* 0x0000000407087825 */
/* 0x000fc800078e020c */
/*0950*/ IMAD R24, R19, R21, R24 ; /* 0x0000001513187224 */
/* 0x000fe400078e0218 */
/*0960*/ ISETP.NE.OR P0, PT, R5, RZ, P0 ; /* 0x000000ff0500720c */
/* 0x000fda0000705670 */
/*0970*/ @!P0 BRA 0xb20 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0980*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe40008000f00 */
/*0990*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe40008000f00 */
/*09a0*/ MOV R7, c[0x0][0x180] ; /* 0x0000600000077a02 */
/* 0x000fc60000000f00 */
/*09b0*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */
/* 0x000fc800078e020a */
/*09c0*/ IMAD.WIDE R16, R7.reuse, 0x4, R8 ; /* 0x0000000407107825 */
/* 0x040fe200078e0208 */
/*09d0*/ LDG.E R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea8000c1e1900 */
/*09e0*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */
/* 0x000ea2000c1e1900 */
/*09f0*/ IMAD.WIDE R12, R7, 0x4, R16 ; /* 0x00000004070c7825 */
/* 0x000fc600078e0210 */
/*0a00*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ee8000c1e1900 */
/*0a10*/ LDG.E R19, [R10.64+0x4] ; /* 0x000004040a137981 */
/* 0x000ee2000c1e1900 */
/*0a20*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */
/* 0x000fc600078e020c */
/*0a30*/ LDG.E R21, [R12.64] ; /* 0x000000040c157981 */
/* 0x000f28000c1e1900 */
/*0a40*/ LDG.E R20, [R10.64+0x8] ; /* 0x000008040a147981 */
/* 0x000f28000c1e1900 */
/*0a50*/ LDG.E R22, [R10.64+0xc] ; /* 0x00000c040a167981 */
/* 0x000f68000c1e1900 */
/*0a60*/ LDG.E R23, [R14.64] ; /* 0x000000040e177981 */
/* 0x000f62000c1e1900 */
/*0a70*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */
/* 0x000fc80007ffe0ff */
/*0a80*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0a90*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0aa0*/ IADD3 R2, R2, 0x4, RZ ; /* 0x0000000402027810 */
/* 0x000fc60007ffe0ff */
/*0ab0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0ac0*/ IMAD R18, R9, R18, R24 ; /* 0x0000001209127224 */
/* 0x004fc800078e0218 */
/*0ad0*/ IMAD R18, R17, R19, R18 ; /* 0x0000001311127224 */
/* 0x008fe400078e0212 */
/*0ae0*/ IMAD.WIDE R8, R7, 0x4, R14 ; /* 0x0000000407087825 */
/* 0x000fc800078e020e */
/*0af0*/ IMAD R18, R21, R20, R18 ; /* 0x0000001415127224 */
/* 0x010fc800078e0212 */
/*0b00*/ IMAD R24, R23, R22, R18 ; /* 0x0000001617187224 */
/* 0x020fe200078e0212 */
/*0b10*/ @P0 BRA 0x980 ; /* 0xfffffe6000000947 */
/* 0x000fea000383ffff */
/*0b20*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fda0003f05270 */
/*0b30*/ @!P0 BRA 0xc30 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*0b40*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0b50*/ IMAD R6, R3, c[0x0][0x17c], R2 ; /* 0x00005f0003067a24 */
/* 0x000fe400078e0202 */
/*0b60*/ IMAD R2, R2, c[0x0][0x180], R0 ; /* 0x0000600002027a24 */
/* 0x000fce00078e0200 */
/*0b70*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0209 */
/*0b80*/ IMAD.WIDE R8, R2, R9, c[0x0][0x168] ; /* 0x00005a0002087625 */
/* 0x000fca00078e0209 */
/*0b90*/ LDG.E R5, [R8.64] ; /* 0x0000000408057981 */
/* 0x0000a8000c1e1900 */
/*0ba0*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */
/* 0x0002a2000c1e1900 */
/*0bb0*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */
/* 0x000fe40007ffe0ff */
/*0bc0*/ MOV R11, c[0x0][0x180] ; /* 0x00006000000b7a02 */
/* 0x000fe40000000f00 */
/*0bd0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc60003f05270 */
/*0be0*/ IMAD.WIDE R8, R11, 0x4, R8 ; /* 0x000000040b087825 */
/* 0x001fe200078e0208 */
/*0bf0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0c00*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0c10*/ IMAD R24, R5, R2, R24 ; /* 0x0000000205187224 */
/* 0x004fc800078e0218 */
/*0c20*/ @P0 BRA 0xb90 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0c30*/ MOV R2, 0x4 ; /* 0x0000000400027802 */
/* 0x000fe20000000f00 */
/*0c40*/ IMAD R3, R3, c[0x0][0x180], R0 ; /* 0x0000600003037a24 */
/* 0x000fc800078e0200 */
/*0c50*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fca00078e0202 */
/*0c60*/ STG.E [R2.64], R24 ; /* 0x0000001802007986 */
/* 0x000fe2000c101904 */
/*0c70*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c80*/ BRA 0xc80; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15gpu_matrix_multPiS_S_iii
.globl _Z15gpu_matrix_multPiS_S_iii
.p2align 8
.type _Z15gpu_matrix_multPiS_S_iii,@function
_Z15gpu_matrix_multPiS_S_iii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x20
s_load_b32 s4, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s2, 16
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, s14, s2, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s5, v[3:4]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s3, v0
v_cmp_gt_i32_e64 s2, s4, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_6
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v2, v1, s2
v_mov_b32_e32 v5, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
.p2align 6
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v6, 31, v5
s_add_i32 s2, s2, -1
s_cmp_eq_u32 s2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[5:6]
v_add_co_u32 v6, vcc_lo, s6, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
global_load_b32 v8, v[3:4], off
global_load_b32 v9, v[6:7], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[6:7], null, v9, v8, v[2:3]
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_dual_mov_b32 v2, v6 :: v_dual_add_nc_u32 v5, s3, v5
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v2, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x10
v_mad_u64_u32 v[3:4], null, v1, s3, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15gpu_matrix_multPiS_S_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15gpu_matrix_multPiS_S_iii, .Lfunc_end0-_Z15gpu_matrix_multPiS_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15gpu_matrix_multPiS_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15gpu_matrix_multPiS_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00189d7e_00000000-6_gpu_matrix_mult.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii
.type _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii, @function
_Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15gpu_matrix_multPiS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii, .-_Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii
.globl _Z15gpu_matrix_multPiS_S_iii
.type _Z15gpu_matrix_multPiS_S_iii, @function
_Z15gpu_matrix_multPiS_S_iii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z15gpu_matrix_multPiS_S_iiiPiS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z15gpu_matrix_multPiS_S_iii, .-_Z15gpu_matrix_multPiS_S_iii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z15gpu_matrix_multPiS_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z15gpu_matrix_multPiS_S_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gpu_matrix_mult.hip"
.globl _Z30__device_stub__gpu_matrix_multPiS_S_iii # -- Begin function _Z30__device_stub__gpu_matrix_multPiS_S_iii
.p2align 4, 0x90
.type _Z30__device_stub__gpu_matrix_multPiS_S_iii,@function
_Z30__device_stub__gpu_matrix_multPiS_S_iii: # @_Z30__device_stub__gpu_matrix_multPiS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15gpu_matrix_multPiS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z30__device_stub__gpu_matrix_multPiS_S_iii, .Lfunc_end0-_Z30__device_stub__gpu_matrix_multPiS_S_iii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15gpu_matrix_multPiS_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15gpu_matrix_multPiS_S_iii,@object # @_Z15gpu_matrix_multPiS_S_iii
.section .rodata,"a",@progbits
.globl _Z15gpu_matrix_multPiS_S_iii
.p2align 3, 0x0
_Z15gpu_matrix_multPiS_S_iii:
.quad _Z30__device_stub__gpu_matrix_multPiS_S_iii
.size _Z15gpu_matrix_multPiS_S_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z15gpu_matrix_multPiS_S_iii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__gpu_matrix_multPiS_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15gpu_matrix_multPiS_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.