system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
/*
Location qualifiers
__global__
Defines a kernel.
Runs on the GPU, called from the CPU.
Executed with <<<dim3>>> arguments.
__device__
Runs on the GPU, called from the GPU.
Can be used for variables too.
__host__
Runs on the CPU, called from the CPU.
Qualifiers can be mixed
Eg __host__ __device__ foo()
Code compiled for both CPU and GPU
useful for testing
*/
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} | code for sm_80
Function : _Z9addArraysPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
/*
Location qualifiers
__global__
Defines a kernel.
Runs on the GPU, called from the CPU.
Executed with <<<dim3>>> arguments.
__device__
Runs on the GPU, called from the GPU.
Can be used for variables too.
__host__
Runs on the CPU, called from the CPU.
Qualifiers can be mixed
Eg __host__ __device__ foo()
Code compiled for both CPU and GPU
useful for testing
*/
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} | .file "tmpxft_00009e3f_00000000-6_addArrays.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9addArraysPiS_S_PiS_S_
.type _Z32__device_stub__Z9addArraysPiS_S_PiS_S_, @function
_Z32__device_stub__Z9addArraysPiS_S_PiS_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9addArraysPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z9addArraysPiS_S_PiS_S_, .-_Z32__device_stub__Z9addArraysPiS_S_PiS_S_
.globl _Z9addArraysPiS_S_
.type _Z9addArraysPiS_S_, @function
_Z9addArraysPiS_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9addArraysPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9addArraysPiS_S_, .-_Z9addArraysPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9addArraysPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9addArraysPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
/*
Location qualifiers
__global__
Defines a kernel.
Runs on the GPU, called from the CPU.
Executed with <<<dim3>>> arguments.
__device__
Runs on the GPU, called from the GPU.
Can be used for variables too.
__host__
Runs on the CPU, called from the CPU.
Qualifiers can be mixed
Eg __host__ __device__ foo()
Code compiled for both CPU and GPU
useful for testing
*/
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} | #include <hip/hip_runtime.h>
#include "includes.h"
/*
Location qualifiers
__global__
Defines a kernel.
Runs on the GPU, called from the CPU.
Executed with <<<dim3>>> arguments.
__device__
Runs on the GPU, called from the GPU.
Can be used for variables too.
__host__
Runs on the CPU, called from the CPU.
Qualifiers can be mixed
Eg __host__ __device__ foo()
Code compiled for both CPU and GPU
useful for testing
*/
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
Location qualifiers
__global__
Defines a kernel.
Runs on the GPU, called from the CPU.
Executed with <<<dim3>>> arguments.
__device__
Runs on the GPU, called from the GPU.
Can be used for variables too.
__host__
Runs on the CPU, called from the CPU.
Qualifiers can be mixed
Eg __host__ __device__ foo()
Code compiled for both CPU and GPU
useful for testing
*/
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9addArraysPiS_S_
.globl _Z9addArraysPiS_S_
.p2align 8
.type _Z9addArraysPiS_S_,@function
_Z9addArraysPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9addArraysPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9addArraysPiS_S_, .Lfunc_end0-_Z9addArraysPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9addArraysPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z9addArraysPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
Location qualifiers
__global__
Defines a kernel.
Runs on the GPU, called from the CPU.
Executed with <<<dim3>>> arguments.
__device__
Runs on the GPU, called from the GPU.
Can be used for variables too.
__host__
Runs on the CPU, called from the CPU.
Qualifiers can be mixed
Eg __host__ __device__ foo()
Code compiled for both CPU and GPU
useful for testing
*/
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} | .text
.file "addArrays.hip"
.globl _Z24__device_stub__addArraysPiS_S_ # -- Begin function _Z24__device_stub__addArraysPiS_S_
.p2align 4, 0x90
.type _Z24__device_stub__addArraysPiS_S_,@function
_Z24__device_stub__addArraysPiS_S_: # @_Z24__device_stub__addArraysPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9addArraysPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__addArraysPiS_S_, .Lfunc_end0-_Z24__device_stub__addArraysPiS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9addArraysPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9addArraysPiS_S_,@object # @_Z9addArraysPiS_S_
.section .rodata,"a",@progbits
.globl _Z9addArraysPiS_S_
.p2align 3, 0x0
_Z9addArraysPiS_S_:
.quad _Z24__device_stub__addArraysPiS_S_
.size _Z9addArraysPiS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9addArraysPiS_S_"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__addArraysPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9addArraysPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9addArraysPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9addArraysPiS_S_
.globl _Z9addArraysPiS_S_
.p2align 8
.type _Z9addArraysPiS_S_,@function
_Z9addArraysPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9addArraysPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9addArraysPiS_S_, .Lfunc_end0-_Z9addArraysPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9addArraysPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z9addArraysPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00009e3f_00000000-6_addArrays.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9addArraysPiS_S_PiS_S_
.type _Z32__device_stub__Z9addArraysPiS_S_PiS_S_, @function
_Z32__device_stub__Z9addArraysPiS_S_PiS_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9addArraysPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z9addArraysPiS_S_PiS_S_, .-_Z32__device_stub__Z9addArraysPiS_S_PiS_S_
.globl _Z9addArraysPiS_S_
.type _Z9addArraysPiS_S_, @function
_Z9addArraysPiS_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9addArraysPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9addArraysPiS_S_, .-_Z9addArraysPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9addArraysPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9addArraysPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "addArrays.hip"
.globl _Z24__device_stub__addArraysPiS_S_ # -- Begin function _Z24__device_stub__addArraysPiS_S_
.p2align 4, 0x90
.type _Z24__device_stub__addArraysPiS_S_,@function
_Z24__device_stub__addArraysPiS_S_: # @_Z24__device_stub__addArraysPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9addArraysPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__addArraysPiS_S_, .Lfunc_end0-_Z24__device_stub__addArraysPiS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9addArraysPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9addArraysPiS_S_,@object # @_Z9addArraysPiS_S_
.section .rodata,"a",@progbits
.globl _Z9addArraysPiS_S_
.p2align 3, 0x0
_Z9addArraysPiS_S_:
.quad _Z24__device_stub__addArraysPiS_S_
.size _Z9addArraysPiS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9addArraysPiS_S_"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__addArraysPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9addArraysPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | __global__ void repeatedActivations(float* H, int K, int M, int r, float iterfac) {
/*
Avoid repeated activations with a maximum filter
:param H: An KxM matrix whose repeated activations will be suppressed row-wise
:param K, M: Dimensions
:param r: Width of repeated activation filter
:param iterfac: The shrinkage factor for non-maximum values in a neighborhood
*/
extern __shared__ float x[];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
//TODO: FINISH THIS
} | code for sm_80
Function : _Z19repeatedActivationsPfiiif
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __global__ void repeatedActivations(float* H, int K, int M, int r, float iterfac) {
/*
Avoid repeated activations with a maximum filter
:param H: An KxM matrix whose repeated activations will be suppressed row-wise
:param K, M: Dimensions
:param r: Width of repeated activation filter
:param iterfac: The shrinkage factor for non-maximum values in a neighborhood
*/
extern __shared__ float x[];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
//TODO: FINISH THIS
} | .file "tmpxft_0012e3fe_00000000-6_Driedger.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif
.type _Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif, @function
_Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
leaq 8(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19repeatedActivationsPfiiif(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif, .-_Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif
.globl _Z19repeatedActivationsPfiiif
.type _Z19repeatedActivationsPfiiif, @function
_Z19repeatedActivationsPfiiif:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z19repeatedActivationsPfiiif, .-_Z19repeatedActivationsPfiiif
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z19repeatedActivationsPfiiif"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19repeatedActivationsPfiiif(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __global__ void repeatedActivations(float* H, int K, int M, int r, float iterfac) {
/*
Avoid repeated activations with a maximum filter
:param H: An KxM matrix whose repeated activations will be suppressed row-wise
:param K, M: Dimensions
:param r: Width of repeated activation filter
:param iterfac: The shrinkage factor for non-maximum values in a neighborhood
*/
extern __shared__ float x[];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
//TODO: FINISH THIS
} | #include <hip/hip_runtime.h>
__global__ void repeatedActivations(float* H, int K, int M, int r, float iterfac) {
/*
Avoid repeated activations with a maximum filter
:param H: An KxM matrix whose repeated activations will be suppressed row-wise
:param K, M: Dimensions
:param r: Width of repeated activation filter
:param iterfac: The shrinkage factor for non-maximum values in a neighborhood
*/
extern __shared__ float x[];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
//TODO: FINISH THIS
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__ void repeatedActivations(float* H, int K, int M, int r, float iterfac) {
/*
Avoid repeated activations with a maximum filter
:param H: An KxM matrix whose repeated activations will be suppressed row-wise
:param K, M: Dimensions
:param r: Width of repeated activation filter
:param iterfac: The shrinkage factor for non-maximum values in a neighborhood
*/
extern __shared__ float x[];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
//TODO: FINISH THIS
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19repeatedActivationsPfiiif
.globl _Z19repeatedActivationsPfiiif
.p2align 8
.type _Z19repeatedActivationsPfiiif,@function
_Z19repeatedActivationsPfiiif:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19repeatedActivationsPfiiif
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19repeatedActivationsPfiiif, .Lfunc_end0-_Z19repeatedActivationsPfiiif
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19repeatedActivationsPfiiif
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z19repeatedActivationsPfiiif.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__ void repeatedActivations(float* H, int K, int M, int r, float iterfac) {
/*
Avoid repeated activations with a maximum filter
:param H: An KxM matrix whose repeated activations will be suppressed row-wise
:param K, M: Dimensions
:param r: Width of repeated activation filter
:param iterfac: The shrinkage factor for non-maximum values in a neighborhood
*/
extern __shared__ float x[];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
//TODO: FINISH THIS
} | .text
.file "Driedger.hip"
.globl _Z34__device_stub__repeatedActivationsPfiiif # -- Begin function _Z34__device_stub__repeatedActivationsPfiiif
.p2align 4, 0x90
.type _Z34__device_stub__repeatedActivationsPfiiif,@function
_Z34__device_stub__repeatedActivationsPfiiif: # @_Z34__device_stub__repeatedActivationsPfiiif
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movss %xmm0, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19repeatedActivationsPfiiif, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z34__device_stub__repeatedActivationsPfiiif, .Lfunc_end0-_Z34__device_stub__repeatedActivationsPfiiif
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19repeatedActivationsPfiiif, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19repeatedActivationsPfiiif,@object # @_Z19repeatedActivationsPfiiif
.section .rodata,"a",@progbits
.globl _Z19repeatedActivationsPfiiif
.p2align 3, 0x0
_Z19repeatedActivationsPfiiif:
.quad _Z34__device_stub__repeatedActivationsPfiiif
.size _Z19repeatedActivationsPfiiif, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19repeatedActivationsPfiiif"
.size .L__unnamed_1, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__repeatedActivationsPfiiif
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19repeatedActivationsPfiiif
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z19repeatedActivationsPfiiif
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z19repeatedActivationsPfiiif
.globl _Z19repeatedActivationsPfiiif
.p2align 8
.type _Z19repeatedActivationsPfiiif,@function
_Z19repeatedActivationsPfiiif:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19repeatedActivationsPfiiif
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z19repeatedActivationsPfiiif, .Lfunc_end0-_Z19repeatedActivationsPfiiif
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19repeatedActivationsPfiiif
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z19repeatedActivationsPfiiif.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0012e3fe_00000000-6_Driedger.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif
.type _Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif, @function
_Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
leaq 8(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19repeatedActivationsPfiiif(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif, .-_Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif
.globl _Z19repeatedActivationsPfiiif
.type _Z19repeatedActivationsPfiiif, @function
_Z19repeatedActivationsPfiiif:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z19repeatedActivationsPfiiifPfiiif
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z19repeatedActivationsPfiiif, .-_Z19repeatedActivationsPfiiif
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z19repeatedActivationsPfiiif"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z19repeatedActivationsPfiiif(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "Driedger.hip"
.globl _Z34__device_stub__repeatedActivationsPfiiif # -- Begin function _Z34__device_stub__repeatedActivationsPfiiif
.p2align 4, 0x90
.type _Z34__device_stub__repeatedActivationsPfiiif,@function
_Z34__device_stub__repeatedActivationsPfiiif: # @_Z34__device_stub__repeatedActivationsPfiiif
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movss %xmm0, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19repeatedActivationsPfiiif, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z34__device_stub__repeatedActivationsPfiiif, .Lfunc_end0-_Z34__device_stub__repeatedActivationsPfiiif
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19repeatedActivationsPfiiif, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z19repeatedActivationsPfiiif,@object # @_Z19repeatedActivationsPfiiif
.section .rodata,"a",@progbits
.globl _Z19repeatedActivationsPfiiif
.p2align 3, 0x0
_Z19repeatedActivationsPfiiif:
.quad _Z34__device_stub__repeatedActivationsPfiiif
.size _Z19repeatedActivationsPfiiif, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z19repeatedActivationsPfiiif"
.size .L__unnamed_1, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__repeatedActivationsPfiiif
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z19repeatedActivationsPfiiif
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<string.h>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<fstream>
#include<sys/time.h>
#include<cuda.h>
#define INF 10000;
#define MAX_THREADS_PER_BLOCK 1024
using namespace std;
dim3 gridDimension;
dim3 blockDimension;
bool readInput(char *fileName, int &vCount, int &eCount, int *&vertex, int *&edge, int *&departure, int *&duration, int &source)
{
ifstream fin;
fin.open(fileName);
fin>>vCount>>eCount>>source;
vertex = new int[vCount+1];
edge = new int[eCount];
departure = new int[eCount];
duration = new int[eCount];
for(int i=0; i<=vCount; i++)
fin>>vertex[i];
for(int i=0; i<=eCount-1; i++)
fin>>edge[i];
for(int i=0; i<=eCount-1; i++)
fin>>departure[i];
for(int i=0; i<=eCount-1; i++)
fin>>duration[i];
//cout<<"reading the input is over"<<endl;
return true;
}
bool printInput(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration)
{
ofstream fout;
fout.open("csr2.txt");
for(int i=0; i<=vCount; i++)
fout<<vertex[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<edge[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<departure[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<duration[i]<<" ";
fout<<endl;
return true;
}
void initConfiguration(dim3 &grid, dim3 &block, int n)
{
int num_of_blocks = 1;
int num_of_threads_per_block = n;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(n>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(n/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
grid.x = num_of_blocks; grid.y=1; grid.z=1;
block.x = num_of_threads_per_block; block.y=1; block.z=1;
}
//**should be initialized with specified time instead of zero
void initArray(int *&X, int n)
{
X = new int[n];
for(int i=0; i<=n-1; i++)
{
X[i] = INF;
}
}
void cudaCopyToDevice(int *X, int *&cX, int n)
{
cudaMalloc((void**)&cX, n*sizeof(int));
cudaMemcpy( cX, X, n*sizeof(int), cudaMemcpyHostToDevice);
}
__global__
void processVertex(int *vertex, int *edge, int *departure, int *duration, int *earliestTime,int *level)
{
int i,u,v,t, lambda;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i>=1024*100) return;
u = 0;
v = edge[i];
t = departure[i];
lambda = duration[i];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
level[v]=1;
}
}
__global__
void processVertices(int iterations, int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, bool *dContinue, int *level)
{
int i,j,u,v,t,lambda,degree;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i >= vCount) return;
//if(iterations==0 && i!=0) return;
//if(level[i] != iterations) return;
u = i;
degree = vertex[u+1] - vertex[u];
for(j=1; j<=degree; j++)
{
v = edge[vertex[u]+j-1];
t = departure[vertex[u]+j-1];
lambda = duration[vertex[u]+j-1];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
*dContinue=true;
//level[v]=iterations+1;
}
}
}
void computeEarliestTimes(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, int *level)
{
int iterations=0;
bool hContinue;
bool *dContinue;
cudaMalloc( (void**) &dContinue, sizeof(bool));
//processVertex<<< 100, 1024>>>(vertex, edge, departure, duration, earliestTime,level);
iterations=1;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
hContinue=false;
cudaMemcpy(dContinue, &hContinue, sizeof(bool), cudaMemcpyHostToDevice) ;
processVertices<<< gridDimension, blockDimension, 0 >>>(iterations,vCount, eCount, vertex, edge, departure, duration, earliestTime,dContinue,level);
// check if kernel execution generated and error
//Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
cudaMemcpy( &hContinue, dContinue, sizeof(bool), cudaMemcpyDeviceToHost) ;
iterations++;
}
while(hContinue);
}
int main(int argc, char *argv[])
{
int vCount, eCount, source;
int *edge, *vertex, *departure, *duration, *earliestTime, *level;
int *cEdge, *cVertex, *cDeparture, *cDuration, *cEarliestTime, *cLevel;
char fileName[100];
struct timeval start,stop;
double time;
strcpy(fileName, argv[1]);
readInput(fileName,vCount, eCount, vertex, edge, departure, duration, source);
initConfiguration(gridDimension,blockDimension, vCount);
cudaCopyToDevice(vertex,cVertex,vCount);
cudaCopyToDevice(edge,cEdge,eCount);
cudaCopyToDevice(departure,cDeparture,eCount);
cudaCopyToDevice(duration,cDuration,eCount);
initArray(earliestTime,vCount);
earliestTime[source]=0; // starting time
cudaCopyToDevice(earliestTime,cEarliestTime,vCount);
//initArray(level,vCount);
// level[source]=0;
// cudaCopyToDevice(level,cLevel,vCount);
gettimeofday(&start,0);
computeEarliestTimes(vCount,eCount,cVertex,cEdge,cDeparture,cDuration,cEarliestTime,cLevel);
gettimeofday(&stop,0);
time = (1000000.0*(stop.tv_sec-start.tv_sec) + stop.tv_usec-start.tv_usec)/1000.0;
cudaMemcpy(earliestTime, cEarliestTime, vCount*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(cEarliestTime);
cudaFree(cEdge);
cudaFree(cVertex);
cudaFree(cDeparture);
cudaFree(cDuration);
//cout<<"Memory copied"<<endl;
for(int i=0;i<=vCount-1;i++)
{
cout<<i<<" "<<earliestTime[i]<<endl;
//fprintf(fp1,"Earliest time for %d is %d\n",i,earliest[i]);
}
cout<<"Time is "<<time<<endl;
return 0;
} | code for sm_80
Function : _Z15processVerticesiiiPiS_S_S_S_PbS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, 0x400, R3 ; /* 0x0000040004047824 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x164], PT ; /* 0x0000590004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R9, c[0x0][0x170] ; /* 0x00005c0004027625 */
/* 0x000fca00078e0209 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea8000c1e1900 */
/*00a0*/ LDG.E R5, [R2.64+0x4] ; /* 0x0000040402057981 */
/* 0x000ea4000c1e1900 */
/*00b0*/ ISETP.GT.AND P0, PT, R5, R0, PT ; /* 0x000000000500720c */
/* 0x004fda0003f04270 */
/*00c0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00d0*/ LOP3.LUT R6, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff067212 */
/* 0x000fe200078e33ff */
/*00e0*/ IMAD.IADD R7, R5.reuse, 0x1, -R0 ; /* 0x0000000105077824 */
/* 0x040fe200078e0a00 */
/*00f0*/ BSSY B0, 0x8b0 ; /* 0x000007b000007945 */
/* 0x000fe60003800000 */
/*0100*/ IMAD.IADD R6, R5, 0x1, R6 ; /* 0x0000000105067824 */
/* 0x000fe200078e0206 */
/*0110*/ LOP3.LUT R0, R7, 0x3, RZ, 0xc0, !PT ; /* 0x0000000307007812 */
/* 0x000fe200078ec0ff */
/*0120*/ IMAD.WIDE R4, R4, R9, c[0x0][0x190] ; /* 0x0000640004047625 */
/* 0x000fc600078e0209 */
/*0130*/ ISETP.GE.U32.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fe20003f06070 */
/*0140*/ IMAD.MOV.U32 R6, RZ, RZ, 0x1 ; /* 0x00000001ff067424 */
/* 0x000fd800078e00ff */
/*0150*/ @!P0 BRA 0x8a0 ; /* 0x0000074000008947 */
/* 0x000fea0003800000 */
/*0160*/ IMAD.IADD R7, R7, 0x1, -R0 ; /* 0x0000000107077824 */
/* 0x000fe400078e0a00 */
/*0170*/ IMAD.MOV.U32 R6, RZ, RZ, 0x1 ; /* 0x00000001ff067424 */
/* 0x000fe400078e00ff */
/*0180*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */
/* 0x001ea2000c1e1900 */
/*0190*/ IMAD.MOV.U32 R8, RZ, RZ, 0x4 ; /* 0x00000004ff087424 */
/* 0x000fc600078e00ff */
/*01a0*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ee2000c1e1900 */
/*01b0*/ IADD3 R15, R6, -0x1, R11 ; /* 0xffffffff060f7810 */
/* 0x004fca0007ffe00b */
/*01c0*/ IMAD.WIDE R12, R15, R8, c[0x0][0x180] ; /* 0x000060000f0c7625 */
/* 0x000fca00078e0208 */
/*01d0*/ LDG.E R14, [R12.64] ; /* 0x000000040c0e7981 */
/* 0x000ee2000c1e1900 */
/*01e0*/ BSSY B1, 0x360 ; /* 0x0000017000017945 */
/* 0x000fe20003800000 */
/*01f0*/ ISETP.GT.AND P0, PT, R9, R14, PT ; /* 0x0000000e0900720c */
/* 0x008fda0003f04270 */
/*0200*/ @P0 BRA 0x350 ; /* 0x0000014000000947 */
/* 0x000fea0003800000 */
/*0210*/ IMAD.SHL.U32 R18, R15, 0x4, RZ ; /* 0x000000040f127824 */
/* 0x000fe200078e00ff */
/*0220*/ SHF.R.S32.HI R10, RZ, 0x1f, R15 ; /* 0x0000001fff0a7819 */
/* 0x000fc8000001140f */
/*0230*/ SHF.L.U64.HI R10, R15, 0x2, R10 ; /* 0x000000020f0a7819 */
/* 0x000fe4000001020a */
/*0240*/ IADD3 R16, P0, R18, c[0x0][0x178], RZ ; /* 0x00005e0012107a10 */
/* 0x000fc80007f1e0ff */
/*0250*/ IADD3.X R17, R10, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f000a117a10 */
/* 0x000fca00007fe4ff */
/*0260*/ LDG.E R13, [R16.64] ; /* 0x00000004100d7981 */
/* 0x000ea2000c1e1900 */
/*0270*/ IADD3 R18, P0, R18, c[0x0][0x188], RZ ; /* 0x0000620012127a10 */
/* 0x000fc80007f1e0ff */
/*0280*/ IADD3.X R19, R10, c[0x0][0x18c], RZ, P0, !PT ; /* 0x000063000a137a10 */
/* 0x000fca00007fe4ff */
/*0290*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ee2000c1e1900 */
/*02a0*/ IMAD.WIDE R12, R13, R8, c[0x0][0x190] ; /* 0x000064000d0c7625 */
/* 0x004fca00078e0208 */
/*02b0*/ LDG.E R10, [R12.64] ; /* 0x000000040c0a7981 */
/* 0x000ea2000c1e1900 */
/*02c0*/ IMAD.IADD R21, R14, 0x1, R18 ; /* 0x000000010e157824 */
/* 0x008fca00078e0212 */
/*02d0*/ ISETP.GE.AND P0, PT, R21, R10, PT ; /* 0x0000000a1500720c */
/* 0x004fe20003f06270 */
/*02e0*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fd800078e00ff */
/*02f0*/ @!P0 IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff0e8624 */
/* 0x000fe400078e00ff */
/*0300*/ @!P0 IMAD.MOV.U32 R15, RZ, RZ, c[0x0][0x19c] ; /* 0x00006700ff0f8624 */
/* 0x000fe200078e00ff */
/*0310*/ @!P0 STG.E [R12.64], R21 ; /* 0x000000150c008986 */
/* 0x0001e8000c101904 */
/*0320*/ @!P0 STG.E.U8 [R14.64], R10 ; /* 0x0000000a0e008986 */
/* 0x0001e8000c101104 */
/*0330*/ @!P0 LDG.E R11, [R2.64] ; /* 0x00000004020b8981 */
/* 0x000168000c1e1900 */
/*0340*/ @!P0 LDG.E R9, [R4.64] ; /* 0x0000000404098981 */
/* 0x000164000c1e1900 */
/*0350*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0360*/ IMAD.IADD R15, R11, 0x1, R6 ; /* 0x000000010b0f7824 */
/* 0x021fc800078e0206 */
/*0370*/ IMAD.WIDE R12, R15, R8, c[0x0][0x180] ; /* 0x000060000f0c7625 */
/* 0x000fca00078e0208 */
/*0380*/ LDG.E R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000ea2000c1e1900 */
/*0390*/ BSSY B1, 0x510 ; /* 0x0000017000017945 */
/* 0x000fe20003800000 */
/*03a0*/ ISETP.GT.AND P0, PT, R9, R18, PT ; /* 0x000000120900720c */
/* 0x004fda0003f04270 */
/*03b0*/ @P0 BRA 0x500 ; /* 0x0000014000000947 */
/* 0x000fea0003800000 */
/*03c0*/ IMAD.SHL.U32 R16, R15, 0x4, RZ ; /* 0x000000040f107824 */
/* 0x000fe200078e00ff */
/*03d0*/ SHF.R.S32.HI R10, RZ, 0x1f, R15 ; /* 0x0000001fff0a7819 */
/* 0x000fc8000001140f */
/*03e0*/ SHF.L.U64.HI R10, R15, 0x2, R10 ; /* 0x000000020f0a7819 */
/* 0x000fe4000001020a */
/*03f0*/ IADD3 R14, P0, R16, c[0x0][0x178], RZ ; /* 0x00005e00100e7a10 */
/* 0x000fc80007f1e0ff */
/*0400*/ IADD3.X R15, R10, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f000a0f7a10 */
/* 0x000fca00007fe4ff */
/*0410*/ LDG.E R13, [R14.64] ; /* 0x000000040e0d7981 */
/* 0x000ea2000c1e1900 */
/*0420*/ IADD3 R16, P0, R16, c[0x0][0x188], RZ ; /* 0x0000620010107a10 */
/* 0x000fc80007f1e0ff */
/*0430*/ IADD3.X R17, R10, c[0x0][0x18c], RZ, P0, !PT ; /* 0x000063000a117a10 */
/* 0x000fca00007fe4ff */
/*0440*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ee2000c1e1900 */
/*0450*/ IMAD.WIDE R12, R13, R8, c[0x0][0x190] ; /* 0x000064000d0c7625 */
/* 0x004fca00078e0208 */
/*0460*/ LDG.E R10, [R12.64] ; /* 0x000000040c0a7981 */
/* 0x000ea2000c1e1900 */
/*0470*/ IMAD.IADD R21, R18, 0x1, R16 ; /* 0x0000000112157824 */
/* 0x008fca00078e0210 */
/*0480*/ ISETP.GE.AND P0, PT, R21, R10, PT ; /* 0x0000000a1500720c */
/* 0x004fe20003f06270 */
/*0490*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fd800078e00ff */
/*04a0*/ @!P0 IMAD.MOV.U32 R18, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff128624 */
/* 0x000fe400078e00ff */
/*04b0*/ @!P0 IMAD.MOV.U32 R19, RZ, RZ, c[0x0][0x19c] ; /* 0x00006700ff138624 */
/* 0x000fe200078e00ff */
/*04c0*/ @!P0 STG.E [R12.64], R21 ; /* 0x000000150c008986 */
/* 0x0001e8000c101904 */
/*04d0*/ @!P0 STG.E.U8 [R18.64], R10 ; /* 0x0000000a12008986 */
/* 0x0001e8000c101104 */
/*04e0*/ @!P0 LDG.E R11, [R2.64] ; /* 0x00000004020b8981 */
/* 0x000168000c1e1900 */
/*04f0*/ @!P0 LDG.E R9, [R4.64] ; /* 0x0000000404098981 */
/* 0x000164000c1e1900 */
/*0500*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0510*/ IADD3 R15, R6, 0x1, R11 ; /* 0x00000001060f7810 */
/* 0x020fca0007ffe00b */
/*0520*/ IMAD.WIDE R12, R15, R8, c[0x0][0x180] ; /* 0x000060000f0c7625 */
/* 0x001fca00078e0208 */
/*0530*/ LDG.E R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000ea2000c1e1900 */
/*0540*/ BSSY B1, 0x6c0 ; /* 0x0000017000017945 */
/* 0x000fe20003800000 */
/*0550*/ ISETP.GT.AND P0, PT, R9, R18, PT ; /* 0x000000120900720c */
/* 0x004fda0003f04270 */
/*0560*/ @P0 BRA 0x6b0 ; /* 0x0000014000000947 */
/* 0x000fea0003800000 */
/*0570*/ IMAD.SHL.U32 R16, R15, 0x4, RZ ; /* 0x000000040f107824 */
/* 0x000fe200078e00ff */
/*0580*/ SHF.R.S32.HI R10, RZ, 0x1f, R15 ; /* 0x0000001fff0a7819 */
/* 0x000fc8000001140f */
/*0590*/ SHF.L.U64.HI R10, R15, 0x2, R10 ; /* 0x000000020f0a7819 */
/* 0x000fe4000001020a */
/*05a0*/ IADD3 R14, P0, R16, c[0x0][0x178], RZ ; /* 0x00005e00100e7a10 */
/* 0x000fc80007f1e0ff */
/*05b0*/ IADD3.X R15, R10, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f000a0f7a10 */
/* 0x000fca00007fe4ff */
/*05c0*/ LDG.E R13, [R14.64] ; /* 0x000000040e0d7981 */
/* 0x000ea2000c1e1900 */
/*05d0*/ IADD3 R16, P0, R16, c[0x0][0x188], RZ ; /* 0x0000620010107a10 */
/* 0x000fc80007f1e0ff */
/*05e0*/ IADD3.X R17, R10, c[0x0][0x18c], RZ, P0, !PT ; /* 0x000063000a117a10 */
/* 0x000fca00007fe4ff */
/*05f0*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ee2000c1e1900 */
/*0600*/ IMAD.WIDE R12, R13, R8, c[0x0][0x190] ; /* 0x000064000d0c7625 */
/* 0x004fca00078e0208 */
/*0610*/ LDG.E R10, [R12.64] ; /* 0x000000040c0a7981 */
/* 0x000ea2000c1e1900 */
/*0620*/ IMAD.IADD R21, R18, 0x1, R16 ; /* 0x0000000112157824 */
/* 0x008fca00078e0210 */
/*0630*/ ISETP.GE.AND P0, PT, R21, R10, PT ; /* 0x0000000a1500720c */
/* 0x004fe20003f06270 */
/*0640*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fd800078e00ff */
/*0650*/ @!P0 IMAD.MOV.U32 R18, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff128624 */
/* 0x000fe400078e00ff */
/*0660*/ @!P0 IMAD.MOV.U32 R19, RZ, RZ, c[0x0][0x19c] ; /* 0x00006700ff138624 */
/* 0x000fe200078e00ff */
/*0670*/ @!P0 STG.E [R12.64], R21 ; /* 0x000000150c008986 */
/* 0x0001e8000c101904 */
/*0680*/ @!P0 STG.E.U8 [R18.64], R10 ; /* 0x0000000a12008986 */
/* 0x0001e8000c101104 */
/*0690*/ @!P0 LDG.E R11, [R2.64] ; /* 0x00000004020b8981 */
/* 0x000168000c1e1900 */
/*06a0*/ @!P0 LDG.E R9, [R4.64] ; /* 0x0000000404098981 */
/* 0x000164000c1e1900 */
/*06b0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*06c0*/ IADD3 R13, R6, 0x2, R11 ; /* 0x00000002060d7810 */
/* 0x021fca0007ffe00b */
/*06d0*/ IMAD.WIDE R10, R13, R8, c[0x0][0x180] ; /* 0x000060000d0a7625 */
/* 0x000fca00078e0208 */
/*06e0*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */
/* 0x000ea2000c1e1900 */
/*06f0*/ IADD3 R7, R7, -0x4, RZ ; /* 0xfffffffc07077810 */
/* 0x000fe20007ffe0ff */
/*0700*/ BSSY B1, 0x880 ; /* 0x0000017000017945 */
/* 0x000fe60003800000 */
/*0710*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe40003f05270 */
/*0720*/ ISETP.GT.AND P1, PT, R9, R15, PT ; /* 0x0000000f0900720c */
/* 0x004fda0003f24270 */
/*0730*/ @P1 BRA 0x870 ; /* 0x0000013000001947 */
/* 0x000fea0003800000 */
/*0740*/ SHF.R.S32.HI R10, RZ, 0x1f, R13 ; /* 0x0000001fff0a7819 */
/* 0x000fe2000001140d */
/*0750*/ IMAD.SHL.U32 R12, R13, 0x4, RZ ; /* 0x000000040d0c7824 */
/* 0x000fc600078e00ff */
/*0760*/ SHF.L.U64.HI R13, R13, 0x2, R10 ; /* 0x000000020d0d7819 */
/* 0x000fe4000001020a */
/*0770*/ IADD3 R10, P1, R12, c[0x0][0x178], RZ ; /* 0x00005e000c0a7a10 */
/* 0x000fc80007f3e0ff */
/*0780*/ IADD3.X R11, R13, c[0x0][0x17c], RZ, P1, !PT ; /* 0x00005f000d0b7a10 */
/* 0x000fca0000ffe4ff */
/*0790*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */
/* 0x000ea2000c1e1900 */
/*07a0*/ IADD3 R12, P1, R12, c[0x0][0x188], RZ ; /* 0x000062000c0c7a10 */
/* 0x000fc80007f3e0ff */
/*07b0*/ IADD3.X R13, R13, c[0x0][0x18c], RZ, P1, !PT ; /* 0x000063000d0d7a10 */
/* 0x000fca0000ffe4ff */
/*07c0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000ee2000c1e1900 */
/*07d0*/ IMAD.WIDE R8, R9, R8, c[0x0][0x190] ; /* 0x0000640009087625 */
/* 0x004fca00078e0208 */
/*07e0*/ LDG.E R14, [R8.64] ; /* 0x00000004080e7981 */
/* 0x000ea2000c1e1900 */
/*07f0*/ IMAD.IADD R17, R15, 0x1, R12 ; /* 0x000000010f117824 */
/* 0x008fca00078e020c */
/*0800*/ ISETP.GE.AND P1, PT, R17, R14, PT ; /* 0x0000000e1100720c */
/* 0x004fe20003f26270 */
/*0810*/ IMAD.MOV.U32 R14, RZ, RZ, 0x1 ; /* 0x00000001ff0e7424 */
/* 0x000fd800078e00ff */
/*0820*/ @!P1 PRMT R11, R14, 0x7610, R11 ; /* 0x000076100e0b9816 */
/* 0x000fe2000000000b */
/*0830*/ @!P1 IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff0e9624 */
/* 0x000fe400078e00ff */
/*0840*/ @!P1 IMAD.MOV.U32 R15, RZ, RZ, c[0x0][0x19c] ; /* 0x00006700ff0f9624 */
/* 0x000fe200078e00ff */
/*0850*/ @!P1 STG.E [R8.64], R17 ; /* 0x0000001108009986 */
/* 0x0001e8000c101904 */
/*0860*/ @!P1 STG.E.U8 [R14.64], R11 ; /* 0x0000000b0e009986 */
/* 0x0001e4000c101104 */
/*0870*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0880*/ IADD3 R6, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x000fe20007ffe0ff */
/*0890*/ @P0 BRA 0x180 ; /* 0xfffff8e000000947 */
/* 0x000fea000383ffff */
/*08a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*08b0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*08c0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*08d0*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */
/* 0x000fe40007ffe0ff */
/*08e0*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x001ea2000c1e1900 */
/*08f0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x4 ; /* 0x00000004ff0c7424 */
/* 0x000fe400078e00ff */
/*0900*/ IMAD.IADD R11, R7, 0x1, R6 ; /* 0x00000001070b7824 */
/* 0x005fe400078e0206 */
/*0910*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*0920*/ IMAD.WIDE R8, R11, R12, c[0x0][0x180] ; /* 0x000060000b087625 */
/* 0x000fca00078e020c */
/*0930*/ LDG.E R14, [R8.64] ; /* 0x00000004080e7981 */
/* 0x000ea2000c1e1900 */
/*0940*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */
/* 0x000fe20007ffe0ff */
/*0950*/ BSSY B0, 0xac0 ; /* 0x0000016000007945 */
/* 0x000fe60003800000 */
/*0960*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f05270 */
/*0970*/ ISETP.GT.AND P1, PT, R7, R14, PT ; /* 0x0000000e0700720c */
/* 0x004fda0003f24270 */
/*0980*/ @P1 BRA 0xab0 ; /* 0x0000012000001947 */
/* 0x000fea0003800000 */
/*0990*/ SHF.R.S32.HI R8, RZ, 0x1f, R11 ; /* 0x0000001fff087819 */
/* 0x000fe2000001140b */
/*09a0*/ IMAD.SHL.U32 R10, R11, 0x4, RZ ; /* 0x000000040b0a7824 */
/* 0x000fc600078e00ff */
/*09b0*/ SHF.L.U64.HI R7, R11, 0x2, R8 ; /* 0x000000020b077819 */
/* 0x000fe40000010208 */
/*09c0*/ IADD3 R8, P1, R10, c[0x0][0x178], RZ ; /* 0x00005e000a087a10 */
/* 0x000fc80007f3e0ff */
/*09d0*/ IADD3.X R9, R7, c[0x0][0x17c], RZ, P1, !PT ; /* 0x00005f0007097a10 */
/* 0x000fca0000ffe4ff */
/*09e0*/ LDG.E R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000ea2000c1e1900 */
/*09f0*/ IADD3 R10, P1, R10, c[0x0][0x188], RZ ; /* 0x000062000a0a7a10 */
/* 0x000fc80007f3e0ff */
/*0a00*/ IADD3.X R11, R7, c[0x0][0x18c], RZ, P1, !PT ; /* 0x00006300070b7a10 */
/* 0x000fca0000ffe4ff */
/*0a10*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000ee2000c1e1900 */
/*0a20*/ IMAD.WIDE R12, R8, R12, c[0x0][0x190] ; /* 0x00006400080c7625 */
/* 0x004fca00078e020c */
/*0a30*/ LDG.E R7, [R12.64] ; /* 0x000000040c077981 */
/* 0x000ea2000c1e1900 */
/*0a40*/ IMAD.IADD R16, R14, 0x1, R10 ; /* 0x000000010e107824 */
/* 0x008fca00078e020a */
/*0a50*/ ISETP.GE.AND P1, PT, R16, R7, PT ; /* 0x000000071000720c */
/* 0x004fe20003f26270 */
/*0a60*/ IMAD.MOV.U32 R7, RZ, RZ, 0x1 ; /* 0x00000001ff077424 */
/* 0x000fd800078e00ff */
/*0a70*/ @!P1 IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff0e9624 */
/* 0x000fe400078e00ff */
/*0a80*/ @!P1 IMAD.MOV.U32 R15, RZ, RZ, c[0x0][0x19c] ; /* 0x00006700ff0f9624 */
/* 0x000fe200078e00ff */
/*0a90*/ @!P1 STG.E [R12.64], R16 ; /* 0x000000100c009986 */
/* 0x0001e8000c101904 */
/*0aa0*/ @!P1 STG.E.U8 [R14.64], R7 ; /* 0x000000070e009986 */
/* 0x0001e4000c101104 */
/*0ab0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0ac0*/ IADD3 R6, R6, 0x1, RZ ; /* 0x0000000106067810 */
/* 0x000fe20007ffe0ff */
/*0ad0*/ @P0 BRA 0x8e0 ; /* 0xfffffe0000000947 */
/* 0x000fea000383ffff */
/*0ae0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0af0*/ BRA 0xaf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0b00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13processVertexPiS_S_S_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, 0x400, R3 ; /* 0x0000040000007824 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R0, 0x18fff, PT ; /* 0x00018fff0000780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R8, RZ, RZ, 0x4 ; /* 0x00000004ff087424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff047624 */
/* 0x000fe400078e00ff */
/*0090*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff057624 */
/* 0x000fe400078e00ff */
/*00a0*/ IMAD.WIDE R2, R0, R8, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fc600078e0208 */
/*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ea4000c1e1900 */
/*00d0*/ ISETP.GT.AND P0, PT, R4, R9, PT ; /* 0x000000090400720c */
/* 0x004fda0003f04270 */
/*00e0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00f0*/ IMAD.SHL.U32 R6, R0, 0x4, RZ ; /* 0x0000000400067824 */
/* 0x000fe200078e00ff */
/*0100*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x000fc80000011400 */
/*0110*/ SHF.L.U64.HI R3, R0, 0x2, R3 ; /* 0x0000000200037819 */
/* 0x000fe40000010203 */
/*0120*/ IADD3 R4, P0, R6, c[0x0][0x168], RZ ; /* 0x00005a0006047a10 */
/* 0x000fc80007f1e0ff */
/*0130*/ IADD3.X R5, R3, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b0003057a10 */
/* 0x000fcc00007fe4ff */
/*0140*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0150*/ IADD3 R6, P0, R6, c[0x0][0x178], RZ ; /* 0x00005e0006067a10 */
/* 0x000fc80007f1e0ff */
/*0160*/ IADD3.X R7, R3, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f0003077a10 */
/* 0x000fca00007fe4ff */
/*0170*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee2000c1e1900 */
/*0180*/ IMAD.WIDE R2, R5, R8, c[0x0][0x180] ; /* 0x0000600005027625 */
/* 0x004fca00078e0208 */
/*0190*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea2000c1e1900 */
/*01a0*/ IMAD.IADD R9, R9, 0x1, R6 ; /* 0x0000000109097824 */
/* 0x008fca00078e0206 */
/*01b0*/ ISETP.GE.AND P0, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x004fda0003f06270 */
/*01c0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*01d0*/ SHF.R.S32.HI R0, RZ, 0x1f, R5 ; /* 0x0000001fff007819 */
/* 0x000fe20000011405 */
/*01e0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x1 ; /* 0x00000001ff077424 */
/* 0x000fe200078e00ff */
/*01f0*/ LEA R4, P0, R5.reuse, c[0x0][0x188], 0x2 ; /* 0x0000620005047a11 */
/* 0x040fe200078010ff */
/*0200*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe6000c101904 */
/*0210*/ LEA.HI.X R5, R5, c[0x0][0x18c], R0, 0x2, P0 ; /* 0x0000630005057a11 */
/* 0x000fca00000f1400 */
/*0220*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0230*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0240*/ BRA 0x240; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<string.h>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<fstream>
#include<sys/time.h>
#include<cuda.h>
#define INF 10000;
#define MAX_THREADS_PER_BLOCK 1024
using namespace std;
dim3 gridDimension;
dim3 blockDimension;
bool readInput(char *fileName, int &vCount, int &eCount, int *&vertex, int *&edge, int *&departure, int *&duration, int &source)
{
ifstream fin;
fin.open(fileName);
fin>>vCount>>eCount>>source;
vertex = new int[vCount+1];
edge = new int[eCount];
departure = new int[eCount];
duration = new int[eCount];
for(int i=0; i<=vCount; i++)
fin>>vertex[i];
for(int i=0; i<=eCount-1; i++)
fin>>edge[i];
for(int i=0; i<=eCount-1; i++)
fin>>departure[i];
for(int i=0; i<=eCount-1; i++)
fin>>duration[i];
//cout<<"reading the input is over"<<endl;
return true;
}
bool printInput(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration)
{
ofstream fout;
fout.open("csr2.txt");
for(int i=0; i<=vCount; i++)
fout<<vertex[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<edge[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<departure[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<duration[i]<<" ";
fout<<endl;
return true;
}
void initConfiguration(dim3 &grid, dim3 &block, int n)
{
int num_of_blocks = 1;
int num_of_threads_per_block = n;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(n>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(n/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
grid.x = num_of_blocks; grid.y=1; grid.z=1;
block.x = num_of_threads_per_block; block.y=1; block.z=1;
}
//**should be initialized with specified time instead of zero
void initArray(int *&X, int n)
{
X = new int[n];
for(int i=0; i<=n-1; i++)
{
X[i] = INF;
}
}
void cudaCopyToDevice(int *X, int *&cX, int n)
{
cudaMalloc((void**)&cX, n*sizeof(int));
cudaMemcpy( cX, X, n*sizeof(int), cudaMemcpyHostToDevice);
}
__global__
void processVertex(int *vertex, int *edge, int *departure, int *duration, int *earliestTime,int *level)
{
int i,u,v,t, lambda;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i>=1024*100) return;
u = 0;
v = edge[i];
t = departure[i];
lambda = duration[i];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
level[v]=1;
}
}
__global__
void processVertices(int iterations, int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, bool *dContinue, int *level)
{
int i,j,u,v,t,lambda,degree;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i >= vCount) return;
//if(iterations==0 && i!=0) return;
//if(level[i] != iterations) return;
u = i;
degree = vertex[u+1] - vertex[u];
for(j=1; j<=degree; j++)
{
v = edge[vertex[u]+j-1];
t = departure[vertex[u]+j-1];
lambda = duration[vertex[u]+j-1];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
*dContinue=true;
//level[v]=iterations+1;
}
}
}
void computeEarliestTimes(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, int *level)
{
int iterations=0;
bool hContinue;
bool *dContinue;
cudaMalloc( (void**) &dContinue, sizeof(bool));
//processVertex<<< 100, 1024>>>(vertex, edge, departure, duration, earliestTime,level);
iterations=1;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
hContinue=false;
cudaMemcpy(dContinue, &hContinue, sizeof(bool), cudaMemcpyHostToDevice) ;
processVertices<<< gridDimension, blockDimension, 0 >>>(iterations,vCount, eCount, vertex, edge, departure, duration, earliestTime,dContinue,level);
// check if kernel execution generated and error
//Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
cudaMemcpy( &hContinue, dContinue, sizeof(bool), cudaMemcpyDeviceToHost) ;
iterations++;
}
while(hContinue);
}
int main(int argc, char *argv[])
{
int vCount, eCount, source;
int *edge, *vertex, *departure, *duration, *earliestTime, *level;
int *cEdge, *cVertex, *cDeparture, *cDuration, *cEarliestTime, *cLevel;
char fileName[100];
struct timeval start,stop;
double time;
strcpy(fileName, argv[1]);
readInput(fileName,vCount, eCount, vertex, edge, departure, duration, source);
initConfiguration(gridDimension,blockDimension, vCount);
cudaCopyToDevice(vertex,cVertex,vCount);
cudaCopyToDevice(edge,cEdge,eCount);
cudaCopyToDevice(departure,cDeparture,eCount);
cudaCopyToDevice(duration,cDuration,eCount);
initArray(earliestTime,vCount);
earliestTime[source]=0; // starting time
cudaCopyToDevice(earliestTime,cEarliestTime,vCount);
//initArray(level,vCount);
// level[source]=0;
// cudaCopyToDevice(level,cLevel,vCount);
gettimeofday(&start,0);
computeEarliestTimes(vCount,eCount,cVertex,cEdge,cDeparture,cDuration,cEarliestTime,cLevel);
gettimeofday(&stop,0);
time = (1000000.0*(stop.tv_sec-start.tv_sec) + stop.tv_usec-start.tv_usec)/1000.0;
cudaMemcpy(earliestTime, cEarliestTime, vCount*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(cEarliestTime);
cudaFree(cEdge);
cudaFree(cVertex);
cudaFree(cDeparture);
cudaFree(cDuration);
//cout<<"Memory copied"<<endl;
for(int i=0;i<=vCount-1;i++)
{
cout<<i<<" "<<earliestTime[i]<<endl;
//fprintf(fp1,"Earliest time for %d is %d\n",i,earliest[i]);
}
cout<<"Time is "<<time<<endl;
return 0;
} | .file "tmpxft_00120e62_00000000-6_node1.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3809:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3809:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z17initConfigurationR4dim3S0_i
.type _Z17initConfigurationR4dim3S0_i, @function
_Z17initConfigurationR4dim3S0_i:
.LFB3802:
.cfi_startproc
endbr64
movl $1, %eax
cmpl $1024, %edx
jle .L4
pxor %xmm0, %xmm0
cvtsi2sdl %edx, %xmm0
mulsd .LC0(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC4(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC1(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L5
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC3(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L5:
cvttsd2sil %xmm3, %eax
movl $1024, %edx
.L4:
movl %eax, (%rdi)
movl $1, 4(%rdi)
movl $1, 8(%rdi)
movl %edx, (%rsi)
movl $1, 4(%rsi)
movl $1, 8(%rsi)
ret
.cfi_endproc
.LFE3802:
.size _Z17initConfigurationR4dim3S0_i, .-_Z17initConfigurationR4dim3S0_i
.globl _Z9initArrayRPii
.type _Z9initArrayRPii, @function
_Z9initArrayRPii:
.LFB3803:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movslq %esi, %rbx
movabsq $2305843009213693950, %rax
cmpq %rbx, %rax
jb .L8
movq %rdi, %rbp
movl %esi, %r12d
salq $2, %rbx
movq %rbx, %rdi
call _Znam@PLT
movq %rax, 0(%rbp)
movl $0, %eax
testl %r12d, %r12d
jle .L7
.L9:
movq 0(%rbp), %rdx
movl $10000, (%rdx,%rax)
addq $4, %rax
cmpq %rax, %rbx
jne .L9
.L7:
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore_state
call __cxa_throw_bad_array_new_length@PLT
.cfi_endproc
.LFE3803:
.size _Z9initArrayRPii, .-_Z9initArrayRPii
.globl _Z16cudaCopyToDevicePiRS_i
.type _Z16cudaCopyToDevicePiRS_i, @function
_Z16cudaCopyToDevicePiRS_i:
.LFB3804:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
movq %rsi, %rbp
movslq %edx, %rbx
salq $2, %rbx
movq %rbx, %rsi
movq %rbp, %rdi
call cudaMalloc@PLT
movq 0(%rbp), %rdi
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
call cudaMemcpy@PLT
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3804:
.size _Z16cudaCopyToDevicePiRS_i, .-_Z16cudaCopyToDevicePiRS_i
.globl _Z43__device_stub__Z13processVertexPiS_S_S_S_S_PiS_S_S_S_S_
.type _Z43__device_stub__Z13processVertexPiS_S_S_S_S_PiS_S_S_S_S_, @function
_Z43__device_stub__Z13processVertexPiS_S_S_S_S_PiS_S_S_S_S_:
.LFB3831:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z13processVertexPiS_S_S_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3831:
.size _Z43__device_stub__Z13processVertexPiS_S_S_S_S_PiS_S_S_S_S_, .-_Z43__device_stub__Z13processVertexPiS_S_S_S_S_PiS_S_S_S_S_
.globl _Z13processVertexPiS_S_S_S_S_
.type _Z13processVertexPiS_S_S_S_S_, @function
_Z13processVertexPiS_S_S_S_S_:
.LFB3832:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z13processVertexPiS_S_S_S_S_PiS_S_S_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3832:
.size _Z13processVertexPiS_S_S_S_S_, .-_Z13processVertexPiS_S_S_S_S_
.globl _Z50__device_stub__Z15processVerticesiiiPiS_S_S_S_PbS_iiiPiS_S_S_S_PbS_
.type _Z50__device_stub__Z15processVerticesiiiPiS_S_S_S_PbS_iiiPiS_S_S_S_PbS_, @function
_Z50__device_stub__Z15processVerticesiiiPiS_S_S_S_PbS_iiiPiS_S_S_S_PbS_:
.LFB3833:
.cfi_startproc
endbr64
subq $248, %rsp
.cfi_def_cfa_offset 256
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movl %edx, 68(%rsp)
movq %rcx, 56(%rsp)
movq %r8, 48(%rsp)
movq %r9, 40(%rsp)
movq 256(%rsp), %rax
movq %rax, 32(%rsp)
movq 264(%rsp), %rax
movq %rax, 24(%rsp)
movq 272(%rsp), %rax
movq %rax, 16(%rsp)
movq 280(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 68(%rsp), %rax
movq %rax, 160(%rsp)
leaq 56(%rsp), %rax
movq %rax, 168(%rsp)
leaq 48(%rsp), %rax
movq %rax, 176(%rsp)
leaq 40(%rsp), %rax
movq %rax, 184(%rsp)
leaq 32(%rsp), %rax
movq %rax, 192(%rsp)
leaq 24(%rsp), %rax
movq %rax, 200(%rsp)
leaq 16(%rsp), %rax
movq %rax, 208(%rsp)
leaq 8(%rsp), %rax
movq %rax, 216(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L29
.L25:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L30
addq $248, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 264
pushq 88(%rsp)
.cfi_def_cfa_offset 272
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z15processVerticesiiiPiS_S_S_S_PbS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 256
jmp .L25
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3833:
.size _Z50__device_stub__Z15processVerticesiiiPiS_S_S_S_PbS_iiiPiS_S_S_S_PbS_, .-_Z50__device_stub__Z15processVerticesiiiPiS_S_S_S_PbS_iiiPiS_S_S_S_PbS_
.globl _Z15processVerticesiiiPiS_S_S_S_PbS_
.type _Z15processVerticesiiiPiS_S_S_S_PbS_, @function
_Z15processVerticesiiiPiS_S_S_S_PbS_:
.LFB3834:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 40(%rsp)
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
pushq 40(%rsp)
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z50__device_stub__Z15processVerticesiiiPiS_S_S_S_PbS_iiiPiS_S_S_S_PbS_
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3834:
.size _Z15processVerticesiiiPiS_S_S_S_PbS_, .-_Z15processVerticesiiiPiS_S_S_S_PbS_
.globl _Z20computeEarliestTimesiiPiS_S_S_S_S_
.type _Z20computeEarliestTimesiiPiS_S_S_S_S_, @function
_Z20computeEarliestTimesiiPiS_S_S_S_S_:
.LFB3805:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movl %edi, %r13d
movl %esi, %r12d
movq %rdx, (%rsp)
movq %rcx, 8(%rsp)
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq 128(%rsp), %r15
movq 136(%rsp), %r14
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 48(%rsp), %rdi
movl $1, %esi
call cudaMalloc@PLT
movl $1, %ebx
leaq 47(%rsp), %rbp
jmp .L35
.L34:
movl $2, %ecx
movl $1, %edx
movq 48(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
addl $1, %ebx
cmpb $0, 47(%rsp)
je .L39
.L35:
movb $0, 47(%rsp)
movl $1, %ecx
movl $1, %edx
movq %rbp, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl 8+blockDimension(%rip), %ecx
movl $0, %r9d
movl $0, %r8d
movq blockDimension(%rip), %rdx
movq gridDimension(%rip), %rdi
movl 8+gridDimension(%rip), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L34
pushq %r14
.cfi_def_cfa_offset 136
pushq 56(%rsp)
.cfi_def_cfa_offset 144
pushq %r15
.cfi_def_cfa_offset 152
pushq 48(%rsp)
.cfi_def_cfa_offset 160
movq 48(%rsp), %r9
movq 40(%rsp), %r8
movq 32(%rsp), %rcx
movl %r12d, %edx
movl %r13d, %esi
movl %ebx, %edi
call _Z50__device_stub__Z15processVerticesiiiPiS_S_S_S_PbS_iiiPiS_S_S_S_PbS_
addq $32, %rsp
.cfi_def_cfa_offset 128
jmp .L34
.L39:
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3805:
.size _Z20computeEarliestTimesiiPiS_S_S_S_S_, .-_Z20computeEarliestTimesiiPiS_S_S_S_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "_Z15processVerticesiiiPiS_S_S_S_PbS_"
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "_Z13processVertexPiS_S_S_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3836:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z15processVerticesiiiPiS_S_S_S_PbS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z13processVertexPiS_S_S_S_S_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3836:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.text
.globl _Z9readInputPcRiS0_RPiS2_S2_S2_S0_
.type _Z9readInputPcRiS0_RPiS2_S2_S2_S0_, @function
_Z9readInputPcRiS0_RPiS2_S2_S2_S0_:
.LFB3800:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA3800
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $568, %rsp
.cfi_def_cfa_offset 624
movq %rdi, 16(%rsp)
movq %rsi, 8(%rsp)
movq %rdx, %rbp
movq %rcx, %r14
movq %r8, %r13
movq %r9, %r12
movq 624(%rsp), %r15
movq 632(%rsp), %rax
movq %rax, 24(%rsp)
movq %fs:40, %rax
movq %rax, 552(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rbx
leaq 288(%rsp), %rdi
call _ZNSt8ios_baseC2Ev@PLT
leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 288(%rsp)
movq $0, 504(%rsp)
movb $0, 512(%rsp)
movb $0, 513(%rsp)
movq $0, 520(%rsp)
movq $0, 528(%rsp)
movq $0, 536(%rsp)
movq $0, 544(%rsp)
movq 8+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 32(%rsp)
movq -24(%rax), %rax
movq 16+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rcx
movq %rcx, 32(%rsp,%rax)
movq $0, 40(%rsp)
movq 32(%rsp), %rax
movq %rbx, %rdi
addq -24(%rax), %rdi
movl $0, %esi
.LEHB0:
call _ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@PLT
.LEHE0:
leaq 24+_ZTVSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 32(%rsp)
leaq 40(%rax), %rax
movq %rax, 288(%rsp)
leaq 48(%rsp), %rdi
.LEHB1:
call _ZNSt13basic_filebufIcSt11char_traitsIcEEC1Ev@PLT
.LEHE1:
leaq 48(%rsp), %rsi
leaq 288(%rsp), %rdi
.LEHB2:
call _ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@PLT
.LEHE2:
jmp .L91
.L78:
endbr64
movq %rax, %rbx
leaq 48(%rsp), %rdi
call _ZNSt13basic_filebufIcSt11char_traitsIcEED1Ev@PLT
.L46:
movq 8+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 32(%rsp)
movq -24(%rax), %rax
movq 16+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rcx
movq %rcx, 32(%rsp,%rax)
movq $0, 40(%rsp)
.L47:
leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 288(%rsp)
leaq 288(%rsp), %rdi
call _ZNSt8ios_baseD2Ev@PLT
movq 552(%rsp), %rax
subq %fs:40, %rax
je .L48
call __stack_chk_fail@PLT
.L77:
endbr64
movq %rax, %rbx
jmp .L46
.L76:
endbr64
movq %rax, %rbx
jmp .L47
.L48:
movq %rbx, %rdi
.LEHB3:
call _Unwind_Resume@PLT
.LEHE3:
.L91:
leaq 48(%rsp), %rdi
movl $8, %edx
movq 16(%rsp), %rsi
.LEHB4:
call _ZNSt13basic_filebufIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode@PLT
testq %rax, %rax
je .L92
movq 32(%rsp), %rax
movq -24(%rax), %rax
leaq 32(%rsp,%rax), %rdi
movl $0, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L50
.L92:
movq 32(%rsp), %rax
movq -24(%rax), %rax
leaq 32(%rsp,%rax), %rdi
movl 32(%rdi), %esi
orl $4, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
.L50:
leaq 32(%rsp), %rdi
movq 8(%rsp), %rbx
movq %rbx, %rsi
call _ZNSirsERi@PLT
movq %rax, %rdi
movq %rbp, %rsi
call _ZNSirsERi@PLT
movq %rax, %rdi
movq 24(%rsp), %rsi
call _ZNSirsERi@PLT
movl (%rbx), %eax
movl %eax, 16(%rsp)
leal 1(%rax), %edi
movslq %edi, %rdi
movabsq $2305843009213693950, %rax
cmpq %rdi, %rax
jb .L51
salq $2, %rdi
call _Znam@PLT
movq %rax, (%r14)
movslq 0(%rbp), %rdi
movabsq $2305843009213693950, %rax
cmpq %rdi, %rax
jb .L93
salq $2, %rdi
call _Znam@PLT
jmp .L94
.L51:
movq 552(%rsp), %rax
subq %fs:40, %rax
je .L53
call __stack_chk_fail@PLT
.L53:
call __cxa_throw_bad_array_new_length@PLT
.L75:
endbr64
movq %rax, %rbx
leaq 32(%rsp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT
movq 552(%rsp), %rax
subq %fs:40, %rax
je .L73
call __stack_chk_fail@PLT
.L94:
movq %rax, 0(%r13)
movslq 0(%rbp), %rdi
movabsq $2305843009213693950, %rax
cmpq %rdi, %rax
jb .L95
salq $2, %rdi
call _Znam@PLT
jmp .L96
.L93:
movq 552(%rsp), %rax
subq %fs:40, %rax
je .L56
call __stack_chk_fail@PLT
.L56:
call __cxa_throw_bad_array_new_length@PLT
.L96:
movq %rax, (%r12)
movslq 0(%rbp), %rdi
movabsq $2305843009213693950, %rax
cmpq %rdi, %rax
jb .L97
salq $2, %rdi
call _Znam@PLT
jmp .L98
.L95:
movq 552(%rsp), %rax
subq %fs:40, %rax
je .L59
call __stack_chk_fail@PLT
.L59:
call __cxa_throw_bad_array_new_length@PLT
.L98:
movq %rax, (%r15)
movq 8(%rsp), %rax
cmpl $0, (%rax)
js .L63
movl $0, %ebx
leaq 32(%rsp), %rax
movq %rax, 16(%rsp)
jmp .L64
.L97:
movq 552(%rsp), %rax
subq %fs:40, %rax
je .L62
call __stack_chk_fail@PLT
.L62:
call __cxa_throw_bad_array_new_length@PLT
.L99:
addq $1, %rbx
movq 8(%rsp), %rax
cmpl %ebx, (%rax)
jl .L63
.L64:
movq (%r14), %rax
leaq (%rax,%rbx,4), %rsi
movq 16(%rsp), %rdi
call _ZNSirsERi@PLT
jmp .L99
.L63:
cmpl $0, 0(%rbp)
jle .L68
movl $0, %ebx
leaq 32(%rsp), %r14
jmp .L66
.L101:
movl 0(%rbp), %eax
addq $1, %rbx
cmpl %ebx, %eax
jle .L100
.L66:
movq 0(%r13), %rax
leaq (%rax,%rbx,4), %rsi
movq %r14, %rdi
call _ZNSirsERi@PLT
jmp .L101
.L100:
testl %eax, %eax
jle .L68
movl $0, %ebx
leaq 32(%rsp), %r13
jmp .L67
.L103:
movl 0(%rbp), %eax
addq $1, %rbx
cmpl %ebx, %eax
jle .L102
.L67:
movq (%r12), %rax
leaq (%rax,%rbx,4), %rsi
movq %r13, %rdi
call _ZNSirsERi@PLT
jmp .L103
.L102:
testl %eax, %eax
jle .L68
movl $0, %ebx
leaq 32(%rsp), %r12
jmp .L69
.L104:
addq $1, %rbx
cmpl %ebx, 0(%rbp)
jle .L68
.L69:
movq (%r15), %rax
leaq (%rax,%rbx,4), %rsi
movq %r12, %rdi
call _ZNSirsERi@PLT
.LEHE4:
jmp .L104
.L68:
leaq 24+_ZTVSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 32(%rsp)
leaq 40(%rax), %rax
movq %rax, 288(%rsp)
leaq 16+_ZTVSt13basic_filebufIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 48(%rsp)
leaq 48(%rsp), %rdi
.LEHB5:
call _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv@PLT
.LEHE5:
jmp .L71
.L79:
endbr64
movq %rax, %rdi
call __cxa_begin_catch@PLT
call __cxa_end_catch@PLT
.L71:
leaq 152(%rsp), %rdi
call _ZNSt12__basic_fileIcED1Ev@PLT
leaq 16+_ZTVSt15basic_streambufIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 48(%rsp)
leaq 104(%rsp), %rdi
call _ZNSt6localeD1Ev@PLT
movq 8+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 32(%rsp)
movq -24(%rax), %rax
movq 16+_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE(%rip), %rdx
movq %rdx, 32(%rsp,%rax)
movq $0, 40(%rsp)
leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 288(%rsp)
leaq 288(%rsp), %rdi
call _ZNSt8ios_baseD2Ev@PLT
movq 552(%rsp), %rax
subq %fs:40, %rax
jne .L105
movl $1, %eax
addq $568, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L73:
.cfi_restore_state
movq %rbx, %rdi
.LEHB6:
call _Unwind_Resume@PLT
.LEHE6:
.L105:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3800:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.align 4
.LLSDA3800:
.byte 0xff
.byte 0x9b
.uleb128 .LLSDATT3800-.LLSDATTD3800
.LLSDATTD3800:
.byte 0x1
.uleb128 .LLSDACSE3800-.LLSDACSB3800
.LLSDACSB3800:
.uleb128 .LEHB0-.LFB3800
.uleb128 .LEHE0-.LEHB0
.uleb128 .L76-.LFB3800
.uleb128 0
.uleb128 .LEHB1-.LFB3800
.uleb128 .LEHE1-.LEHB1
.uleb128 .L77-.LFB3800
.uleb128 0
.uleb128 .LEHB2-.LFB3800
.uleb128 .LEHE2-.LEHB2
.uleb128 .L78-.LFB3800
.uleb128 0
.uleb128 .LEHB3-.LFB3800
.uleb128 .LEHE3-.LEHB3
.uleb128 0
.uleb128 0
.uleb128 .LEHB4-.LFB3800
.uleb128 .LEHE4-.LEHB4
.uleb128 .L75-.LFB3800
.uleb128 0
.uleb128 .LEHB5-.LFB3800
.uleb128 .LEHE5-.LEHB5
.uleb128 .L79-.LFB3800
.uleb128 0x1
.uleb128 .LEHB6-.LFB3800
.uleb128 .LEHE6-.LEHB6
.uleb128 0
.uleb128 0
.LLSDACSE3800:
.byte 0x1
.byte 0
.align 4
.long 0
.LLSDATT3800:
.text
.size _Z9readInputPcRiS0_RPiS2_S2_S2_S0_, .-_Z9readInputPcRiS0_RPiS2_S2_S2_S0_
.section .rodata.str1.1
.LC9:
.string " "
.LC10:
.string "Time is "
.text
.globl main
.type main, @function
main:
.LFB3806:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $264, %rsp
.cfi_def_cfa_offset 320
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
movq 8(%rsi), %rsi
leaq 144(%rsp), %rbx
movl $100, %edx
movq %rbx, %rdi
call __strcpy_chk@PLT
leaq 40(%rsp), %rcx
leaq 24(%rsp), %rdx
leaq 20(%rsp), %rsi
leaq 28(%rsp), %rax
pushq %rax
.cfi_def_cfa_offset 328
leaq 64(%rsp), %rax
pushq %rax
.cfi_def_cfa_offset 336
leaq 64(%rsp), %r9
leaq 48(%rsp), %r8
movq %rbx, %rdi
call _Z9readInputPcRiS0_RPiS2_S2_S2_S0_
movl 36(%rsp), %ebx
movl %ebx, %edx
leaq blockDimension(%rip), %rsi
leaq gridDimension(%rip), %rdi
call _Z17initConfigurationR4dim3S0_i
addq $16, %rsp
.cfi_def_cfa_offset 320
leaq 80(%rsp), %rsi
movl %ebx, %edx
movq 40(%rsp), %rdi
call _Z16cudaCopyToDevicePiRS_i
leaq 72(%rsp), %rsi
movl 24(%rsp), %edx
movq 32(%rsp), %rdi
call _Z16cudaCopyToDevicePiRS_i
leaq 88(%rsp), %rsi
movl 24(%rsp), %edx
movq 48(%rsp), %rdi
call _Z16cudaCopyToDevicePiRS_i
leaq 96(%rsp), %rsi
movl 24(%rsp), %edx
movq 56(%rsp), %rdi
call _Z16cudaCopyToDevicePiRS_i
leaq 64(%rsp), %rdi
movl 20(%rsp), %esi
call _Z9initArrayRPii
movq 64(%rsp), %r13
movslq 28(%rsp), %rax
movl $0, 0(%r13,%rax,4)
leaq 104(%rsp), %rsi
movl 20(%rsp), %edx
movq %r13, %rdi
call _Z16cudaCopyToDevicePiRS_i
leaq 112(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
pushq $0
.cfi_def_cfa_offset 328
pushq 112(%rsp)
.cfi_def_cfa_offset 336
movq 112(%rsp), %r9
movq 104(%rsp), %r8
movq 88(%rsp), %rcx
movq 96(%rsp), %rdx
movl 40(%rsp), %esi
movl 36(%rsp), %edi
call _Z20computeEarliestTimesiiPiS_S_S_S_S_
leaq 144(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movq 144(%rsp), %rax
subq 128(%rsp), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC7(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq 152(%rsp), %xmm1
addsd %xmm1, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq 136(%rsp), %xmm1
subsd %xmm1, %xmm0
divsd .LC8(%rip), %xmm0
movsd %xmm0, 24(%rsp)
addq $16, %rsp
.cfi_def_cfa_offset 320
movslq 20(%rsp), %rdx
salq $2, %rdx
movl $2, %ecx
movq 104(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movq 104(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 96(%rsp), %rdi
call cudaFree@PLT
cmpl $0, 20(%rsp)
jle .L107
movl $0, %ebp
leaq _ZSt4cout(%rip), %r15
leaq .LC9(%rip), %r14
jmp .L112
.L117:
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L116
call _ZSt16__throw_bad_castv@PLT
.L116:
call __stack_chk_fail@PLT
.L110:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
.L111:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $1, %rbp
cmpl %ebp, 20(%rsp)
jle .L107
.L112:
movl %ebp, %esi
movq %r15, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbx
movl $1, %edx
movq %r14, %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl 0(%r13,%rbp,4), %esi
movq %rbx, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r12
testq %r12, %r12
je .L117
cmpb $0, 56(%r12)
je .L110
movzbl 67(%r12), %esi
jmp .L111
.L107:
leaq .LC10(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movsd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L118
movl $0, %eax
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L118:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3806:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "csr2.txt"
.text
.globl _Z10printInputiiPiS_S_S_
.type _Z10printInputiiPiS_S_S_, @function
_Z10printInputiiPiS_S_S_:
.LFB3801:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA3801
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $568, %rsp
.cfi_def_cfa_offset 624
movl %edi, %ebp
movl %esi, %r13d
movq %rdx, %r12
movq %rcx, 8(%rsp)
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq %fs:40, %rax
movq %rax, 552(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rbx
leaq 280(%rsp), %rdi
call _ZNSt8ios_baseC2Ev@PLT
leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 280(%rsp)
movq $0, 496(%rsp)
movb $0, 504(%rsp)
movb $0, 505(%rsp)
movq $0, 512(%rsp)
movq $0, 520(%rsp)
movq $0, 528(%rsp)
movq $0, 536(%rsp)
movq 8+_ZTTSt14basic_ofstreamIcSt11char_traitsIcEE(%rip), %r15
movq %r15, 32(%rsp)
movq -24(%r15), %rax
movq 16+_ZTTSt14basic_ofstreamIcSt11char_traitsIcEE(%rip), %rcx
movq %rcx, 32(%rsp,%rax)
movq 32(%rsp), %rax
addq -24(%rax), %rbx
movq %rbx, %rdi
movl $0, %esi
.LEHB7:
call _ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@PLT
.LEHE7:
leaq 24+_ZTVSt14basic_ofstreamIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 32(%rsp)
leaq 40(%rax), %rax
movq %rax, 280(%rsp)
leaq 40(%rsp), %rdi
.LEHB8:
call _ZNSt13basic_filebufIcSt11char_traitsIcEEC1Ev@PLT
.LEHE8:
leaq 40(%rsp), %rsi
leaq 280(%rsp), %rdi
.LEHB9:
call _ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@PLT
.LEHE9:
jmp .L170
.L160:
endbr64
movq %rax, %rbx
leaq 40(%rsp), %rdi
call _ZNSt13basic_filebufIcSt11char_traitsIcEED1Ev@PLT
.L122:
movq %r15, 32(%rsp)
movq -24(%r15), %rax
movq 16+_ZTTSt14basic_ofstreamIcSt11char_traitsIcEE(%rip), %rcx
movq %rcx, 32(%rsp,%rax)
.L123:
leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 280(%rsp)
leaq 280(%rsp), %rdi
call _ZNSt8ios_baseD2Ev@PLT
movq 552(%rsp), %rax
subq %fs:40, %rax
je .L124
call __stack_chk_fail@PLT
.L159:
endbr64
movq %rax, %rbx
jmp .L122
.L158:
endbr64
movq %rax, %rbx
jmp .L123
.L124:
movq %rbx, %rdi
.LEHB10:
call _Unwind_Resume@PLT
.LEHE10:
.L170:
leaq 40(%rsp), %rdi
movl $16, %edx
leaq .LC11(%rip), %rsi
.LEHB11:
call _ZNSt13basic_filebufIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode@PLT
testq %rax, %rax
je .L171
movq 32(%rsp), %rax
movq -24(%rax), %rax
leaq 32(%rsp,%rax), %rdi
movl $0, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
jmp .L128
.L171:
movq 32(%rsp), %rax
movq -24(%rax), %rax
leaq 32(%rsp,%rax), %rdi
movl 32(%rdi), %esi
orl $4, %esi
call _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@PLT
.L128:
movl $0, %ebx
leaq .LC9(%rip), %r14
testl %ebp, %ebp
jns .L126
.L127:
movq 32(%rsp), %rax
movq -24(%rax), %rax
movq 272(%rsp,%rax), %rbx
testq %rbx, %rbx
je .L172
cmpb $0, 56(%rbx)
je .L131
movzbl 67(%rbx), %esi
.L132:
movsbl %sil, %esi
leaq 32(%rsp), %rdi
call _ZNSo3putEc@PLT
jmp .L173
.L174:
movq %rax, %rdi
movl $1, %edx
movq %r14, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $1, %rbx
cmpl %ebx, %ebp
jl .L127
.L126:
movl (%r12,%rbx,4), %esi
leaq 32(%rsp), %rdi
call _ZNSolsEi@PLT
jmp .L174
.L172:
movq 552(%rsp), %rax
subq %fs:40, %rax
jne .L175
call _ZSt16__throw_bad_castv@PLT
.L157:
endbr64
movq %rax, %rbx
leaq 32(%rsp), %rdi
call _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev@PLT
movq 552(%rsp), %rax
subq %fs:40, %rax
je .L154
call __stack_chk_fail@PLT
.L175:
call __stack_chk_fail@PLT
.L131:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L132
.L173:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
testl %r13d, %r13d
jle .L133
movq 8(%rsp), %rcx
movq %rcx, %rbx
movslq %r13d, %rax
leaq (%rcx,%rax,4), %rbp
leaq .LC9(%rip), %r12
jmp .L134
.L176:
movq %rax, %rdi
movl $1, %edx
movq %r12, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbp, %rbx
je .L133
.L134:
movl (%rbx), %esi
leaq 32(%rsp), %rdi
call _ZNSolsEi@PLT
jmp .L176
.L133:
movq 32(%rsp), %rax
movq -24(%rax), %rax
movq 272(%rsp,%rax), %rbx
testq %rbx, %rbx
je .L177
cmpb $0, 56(%rbx)
je .L137
movzbl 67(%rbx), %esi
.L138:
movsbl %sil, %esi
leaq 32(%rsp), %rdi
call _ZNSo3putEc@PLT
jmp .L178
.L177:
movq 552(%rsp), %rax
subq %fs:40, %rax
jne .L179
call _ZSt16__throw_bad_castv@PLT
.L179:
call __stack_chk_fail@PLT
.L137:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L138
.L178:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
testl %r13d, %r13d
jle .L139
movq 16(%rsp), %rcx
movq %rcx, %rbx
movslq %r13d, %rax
leaq (%rcx,%rax,4), %rbp
leaq .LC9(%rip), %r12
jmp .L140
.L180:
movq %rax, %rdi
movl $1, %edx
movq %r12, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %rbp
je .L139
.L140:
movl (%rbx), %esi
leaq 32(%rsp), %rdi
call _ZNSolsEi@PLT
jmp .L180
.L139:
movq 32(%rsp), %rax
movq -24(%rax), %rax
movq 272(%rsp,%rax), %rbx
testq %rbx, %rbx
je .L181
cmpb $0, 56(%rbx)
je .L143
movzbl 67(%rbx), %esi
.L144:
movsbl %sil, %esi
leaq 32(%rsp), %rdi
call _ZNSo3putEc@PLT
jmp .L182
.L181:
movq 552(%rsp), %rax
subq %fs:40, %rax
jne .L183
call _ZSt16__throw_bad_castv@PLT
.L183:
call __stack_chk_fail@PLT
.L143:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L144
.L182:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
testl %r13d, %r13d
jle .L145
movq 24(%rsp), %rax
movq %rax, %rbx
movslq %r13d, %r13
leaq (%rax,%r13,4), %rbp
leaq .LC9(%rip), %r12
jmp .L146
.L184:
movq %rax, %rdi
movl $1, %edx
movq %r12, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbp, %rbx
je .L145
.L146:
movl (%rbx), %esi
leaq 32(%rsp), %rdi
call _ZNSolsEi@PLT
jmp .L184
.L145:
movq 32(%rsp), %rax
movq -24(%rax), %rax
movq 272(%rsp,%rax), %rbx
testq %rbx, %rbx
je .L185
cmpb $0, 56(%rbx)
je .L149
movzbl 67(%rbx), %esi
.L150:
movsbl %sil, %esi
leaq 32(%rsp), %rdi
call _ZNSo3putEc@PLT
jmp .L186
.L185:
movq 552(%rsp), %rax
subq %fs:40, %rax
jne .L187
call _ZSt16__throw_bad_castv@PLT
.L187:
call __stack_chk_fail@PLT
.L149:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L150
.L186:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
.LEHE11:
leaq 24+_ZTVSt14basic_ofstreamIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 32(%rsp)
leaq 40(%rax), %rax
movq %rax, 280(%rsp)
leaq 16+_ZTVSt13basic_filebufIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rdi
.LEHB12:
call _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv@PLT
.LEHE12:
jmp .L152
.L161:
endbr64
movq %rax, %rdi
call __cxa_begin_catch@PLT
call __cxa_end_catch@PLT
.L152:
leaq 144(%rsp), %rdi
call _ZNSt12__basic_fileIcED1Ev@PLT
leaq 16+_ZTVSt15basic_streambufIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 40(%rsp)
leaq 96(%rsp), %rdi
call _ZNSt6localeD1Ev@PLT
movq %r15, 32(%rsp)
movq -24(%r15), %rax
movq 16+_ZTTSt14basic_ofstreamIcSt11char_traitsIcEE(%rip), %rcx
movq %rcx, 32(%rsp,%rax)
leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 280(%rsp)
leaq 280(%rsp), %rdi
call _ZNSt8ios_baseD2Ev@PLT
movq 552(%rsp), %rax
subq %fs:40, %rax
jne .L188
movl $1, %eax
addq $568, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L154:
.cfi_restore_state
movq %rbx, %rdi
.LEHB13:
call _Unwind_Resume@PLT
.LEHE13:
.L188:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3801:
.section .gcc_except_table
.align 4
.LLSDA3801:
.byte 0xff
.byte 0x9b
.uleb128 .LLSDATT3801-.LLSDATTD3801
.LLSDATTD3801:
.byte 0x1
.uleb128 .LLSDACSE3801-.LLSDACSB3801
.LLSDACSB3801:
.uleb128 .LEHB7-.LFB3801
.uleb128 .LEHE7-.LEHB7
.uleb128 .L158-.LFB3801
.uleb128 0
.uleb128 .LEHB8-.LFB3801
.uleb128 .LEHE8-.LEHB8
.uleb128 .L159-.LFB3801
.uleb128 0
.uleb128 .LEHB9-.LFB3801
.uleb128 .LEHE9-.LEHB9
.uleb128 .L160-.LFB3801
.uleb128 0
.uleb128 .LEHB10-.LFB3801
.uleb128 .LEHE10-.LEHB10
.uleb128 0
.uleb128 0
.uleb128 .LEHB11-.LFB3801
.uleb128 .LEHE11-.LEHB11
.uleb128 .L157-.LFB3801
.uleb128 0
.uleb128 .LEHB12-.LFB3801
.uleb128 .LEHE12-.LEHB12
.uleb128 .L161-.LFB3801
.uleb128 0x1
.uleb128 .LEHB13-.LFB3801
.uleb128 .LEHE13-.LEHB13
.uleb128 0
.uleb128 0
.LLSDACSE3801:
.byte 0x1
.byte 0
.align 4
.long 0
.LLSDATT3801:
.text
.size _Z10printInputiiPiS_S_S_, .-_Z10printInputiiPiS_S_S_
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl blockDimension
.data
.align 8
.type blockDimension, @object
.size blockDimension, 12
blockDimension:
.long 1
.long 1
.long 1
.globl gridDimension
.align 8
.type gridDimension, @object
.size gridDimension, 12
gridDimension:
.long 1
.long 1
.long 1
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1062207488
.align 8
.LC1:
.long 0
.long 1127219200
.align 8
.LC3:
.long 0
.long 1072693248
.align 8
.LC4:
.long -1
.long 2147483647
.align 8
.LC7:
.long 0
.long 1093567616
.align 8
.LC8:
.long 0
.long 1083129856
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<string.h>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<fstream>
#include<sys/time.h>
#include<cuda.h>
#define INF 10000;
#define MAX_THREADS_PER_BLOCK 1024
using namespace std;
dim3 gridDimension;
dim3 blockDimension;
bool readInput(char *fileName, int &vCount, int &eCount, int *&vertex, int *&edge, int *&departure, int *&duration, int &source)
{
ifstream fin;
fin.open(fileName);
fin>>vCount>>eCount>>source;
vertex = new int[vCount+1];
edge = new int[eCount];
departure = new int[eCount];
duration = new int[eCount];
for(int i=0; i<=vCount; i++)
fin>>vertex[i];
for(int i=0; i<=eCount-1; i++)
fin>>edge[i];
for(int i=0; i<=eCount-1; i++)
fin>>departure[i];
for(int i=0; i<=eCount-1; i++)
fin>>duration[i];
//cout<<"reading the input is over"<<endl;
return true;
}
bool printInput(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration)
{
ofstream fout;
fout.open("csr2.txt");
for(int i=0; i<=vCount; i++)
fout<<vertex[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<edge[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<departure[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<duration[i]<<" ";
fout<<endl;
return true;
}
void initConfiguration(dim3 &grid, dim3 &block, int n)
{
int num_of_blocks = 1;
int num_of_threads_per_block = n;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(n>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(n/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
grid.x = num_of_blocks; grid.y=1; grid.z=1;
block.x = num_of_threads_per_block; block.y=1; block.z=1;
}
//**should be initialized with specified time instead of zero
void initArray(int *&X, int n)
{
X = new int[n];
for(int i=0; i<=n-1; i++)
{
X[i] = INF;
}
}
void cudaCopyToDevice(int *X, int *&cX, int n)
{
cudaMalloc((void**)&cX, n*sizeof(int));
cudaMemcpy( cX, X, n*sizeof(int), cudaMemcpyHostToDevice);
}
__global__
void processVertex(int *vertex, int *edge, int *departure, int *duration, int *earliestTime,int *level)
{
int i,u,v,t, lambda;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i>=1024*100) return;
u = 0;
v = edge[i];
t = departure[i];
lambda = duration[i];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
level[v]=1;
}
}
__global__
void processVertices(int iterations, int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, bool *dContinue, int *level)
{
int i,j,u,v,t,lambda,degree;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i >= vCount) return;
//if(iterations==0 && i!=0) return;
//if(level[i] != iterations) return;
u = i;
degree = vertex[u+1] - vertex[u];
for(j=1; j<=degree; j++)
{
v = edge[vertex[u]+j-1];
t = departure[vertex[u]+j-1];
lambda = duration[vertex[u]+j-1];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
*dContinue=true;
//level[v]=iterations+1;
}
}
}
void computeEarliestTimes(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, int *level)
{
int iterations=0;
bool hContinue;
bool *dContinue;
cudaMalloc( (void**) &dContinue, sizeof(bool));
//processVertex<<< 100, 1024>>>(vertex, edge, departure, duration, earliestTime,level);
iterations=1;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
hContinue=false;
cudaMemcpy(dContinue, &hContinue, sizeof(bool), cudaMemcpyHostToDevice) ;
processVertices<<< gridDimension, blockDimension, 0 >>>(iterations,vCount, eCount, vertex, edge, departure, duration, earliestTime,dContinue,level);
// check if kernel execution generated and error
//Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
cudaMemcpy( &hContinue, dContinue, sizeof(bool), cudaMemcpyDeviceToHost) ;
iterations++;
}
while(hContinue);
}
int main(int argc, char *argv[])
{
int vCount, eCount, source;
int *edge, *vertex, *departure, *duration, *earliestTime, *level;
int *cEdge, *cVertex, *cDeparture, *cDuration, *cEarliestTime, *cLevel;
char fileName[100];
struct timeval start,stop;
double time;
strcpy(fileName, argv[1]);
readInput(fileName,vCount, eCount, vertex, edge, departure, duration, source);
initConfiguration(gridDimension,blockDimension, vCount);
cudaCopyToDevice(vertex,cVertex,vCount);
cudaCopyToDevice(edge,cEdge,eCount);
cudaCopyToDevice(departure,cDeparture,eCount);
cudaCopyToDevice(duration,cDuration,eCount);
initArray(earliestTime,vCount);
earliestTime[source]=0; // starting time
cudaCopyToDevice(earliestTime,cEarliestTime,vCount);
//initArray(level,vCount);
// level[source]=0;
// cudaCopyToDevice(level,cLevel,vCount);
gettimeofday(&start,0);
computeEarliestTimes(vCount,eCount,cVertex,cEdge,cDeparture,cDuration,cEarliestTime,cLevel);
gettimeofday(&stop,0);
time = (1000000.0*(stop.tv_sec-start.tv_sec) + stop.tv_usec-start.tv_usec)/1000.0;
cudaMemcpy(earliestTime, cEarliestTime, vCount*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(cEarliestTime);
cudaFree(cEdge);
cudaFree(cVertex);
cudaFree(cDeparture);
cudaFree(cDuration);
//cout<<"Memory copied"<<endl;
for(int i=0;i<=vCount-1;i++)
{
cout<<i<<" "<<earliestTime[i]<<endl;
//fprintf(fp1,"Earliest time for %d is %d\n",i,earliest[i]);
}
cout<<"Time is "<<time<<endl;
return 0;
} | #include<string.h>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<fstream>
#include<sys/time.h>
#include<hip/hip_runtime.h>
#define INF 10000;
#define MAX_THREADS_PER_BLOCK 1024
using namespace std;
dim3 gridDimension;
dim3 blockDimension;
bool readInput(char *fileName, int &vCount, int &eCount, int *&vertex, int *&edge, int *&departure, int *&duration, int &source)
{
ifstream fin;
fin.open(fileName);
fin>>vCount>>eCount>>source;
vertex = new int[vCount+1];
edge = new int[eCount];
departure = new int[eCount];
duration = new int[eCount];
for(int i=0; i<=vCount; i++)
fin>>vertex[i];
for(int i=0; i<=eCount-1; i++)
fin>>edge[i];
for(int i=0; i<=eCount-1; i++)
fin>>departure[i];
for(int i=0; i<=eCount-1; i++)
fin>>duration[i];
//cout<<"reading the input is over"<<endl;
return true;
}
bool printInput(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration)
{
ofstream fout;
fout.open("csr2.txt");
for(int i=0; i<=vCount; i++)
fout<<vertex[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<edge[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<departure[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<duration[i]<<" ";
fout<<endl;
return true;
}
void initConfiguration(dim3 &grid, dim3 &block, int n)
{
int num_of_blocks = 1;
int num_of_threads_per_block = n;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(n>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(n/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
grid.x = num_of_blocks; grid.y=1; grid.z=1;
block.x = num_of_threads_per_block; block.y=1; block.z=1;
}
//**should be initialized with specified time instead of zero
void initArray(int *&X, int n)
{
X = new int[n];
for(int i=0; i<=n-1; i++)
{
X[i] = INF;
}
}
void cudaCopyToDevice(int *X, int *&cX, int n)
{
hipMalloc((void**)&cX, n*sizeof(int));
hipMemcpy( cX, X, n*sizeof(int), hipMemcpyHostToDevice);
}
__global__
void processVertex(int *vertex, int *edge, int *departure, int *duration, int *earliestTime,int *level)
{
int i,u,v,t, lambda;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i>=1024*100) return;
u = 0;
v = edge[i];
t = departure[i];
lambda = duration[i];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
level[v]=1;
}
}
__global__
void processVertices(int iterations, int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, bool *dContinue, int *level)
{
int i,j,u,v,t,lambda,degree;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i >= vCount) return;
//if(iterations==0 && i!=0) return;
//if(level[i] != iterations) return;
u = i;
degree = vertex[u+1] - vertex[u];
for(j=1; j<=degree; j++)
{
v = edge[vertex[u]+j-1];
t = departure[vertex[u]+j-1];
lambda = duration[vertex[u]+j-1];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
*dContinue=true;
//level[v]=iterations+1;
}
}
}
void computeEarliestTimes(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, int *level)
{
int iterations=0;
bool hContinue;
bool *dContinue;
hipMalloc( (void**) &dContinue, sizeof(bool));
//processVertex<<< 100, 1024>>>(vertex, edge, departure, duration, earliestTime,level);
iterations=1;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
hContinue=false;
hipMemcpy(dContinue, &hContinue, sizeof(bool), hipMemcpyHostToDevice) ;
processVertices<<< gridDimension, blockDimension, 0 >>>(iterations,vCount, eCount, vertex, edge, departure, duration, earliestTime,dContinue,level);
// check if kernel execution generated and error
//Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
hipMemcpy( &hContinue, dContinue, sizeof(bool), hipMemcpyDeviceToHost) ;
iterations++;
}
while(hContinue);
}
int main(int argc, char *argv[])
{
int vCount, eCount, source;
int *edge, *vertex, *departure, *duration, *earliestTime, *level;
int *cEdge, *cVertex, *cDeparture, *cDuration, *cEarliestTime, *cLevel;
char fileName[100];
struct timeval start,stop;
double time;
strcpy(fileName, argv[1]);
readInput(fileName,vCount, eCount, vertex, edge, departure, duration, source);
initConfiguration(gridDimension,blockDimension, vCount);
cudaCopyToDevice(vertex,cVertex,vCount);
cudaCopyToDevice(edge,cEdge,eCount);
cudaCopyToDevice(departure,cDeparture,eCount);
cudaCopyToDevice(duration,cDuration,eCount);
initArray(earliestTime,vCount);
earliestTime[source]=0; // starting time
cudaCopyToDevice(earliestTime,cEarliestTime,vCount);
//initArray(level,vCount);
// level[source]=0;
// cudaCopyToDevice(level,cLevel,vCount);
gettimeofday(&start,0);
computeEarliestTimes(vCount,eCount,cVertex,cEdge,cDeparture,cDuration,cEarliestTime,cLevel);
gettimeofday(&stop,0);
time = (1000000.0*(stop.tv_sec-start.tv_sec) + stop.tv_usec-start.tv_usec)/1000.0;
hipMemcpy(earliestTime, cEarliestTime, vCount*sizeof(int), hipMemcpyDeviceToHost);
hipFree(cEarliestTime);
hipFree(cEdge);
hipFree(cVertex);
hipFree(cDeparture);
hipFree(cDuration);
//cout<<"Memory copied"<<endl;
for(int i=0;i<=vCount-1;i++)
{
cout<<i<<" "<<earliestTime[i]<<endl;
//fprintf(fp1,"Earliest time for %d is %d\n",i,earliest[i]);
}
cout<<"Time is "<<time<<endl;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<string.h>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<fstream>
#include<sys/time.h>
#include<hip/hip_runtime.h>
#define INF 10000;
#define MAX_THREADS_PER_BLOCK 1024
using namespace std;
dim3 gridDimension;
dim3 blockDimension;
bool readInput(char *fileName, int &vCount, int &eCount, int *&vertex, int *&edge, int *&departure, int *&duration, int &source)
{
ifstream fin;
fin.open(fileName);
fin>>vCount>>eCount>>source;
vertex = new int[vCount+1];
edge = new int[eCount];
departure = new int[eCount];
duration = new int[eCount];
for(int i=0; i<=vCount; i++)
fin>>vertex[i];
for(int i=0; i<=eCount-1; i++)
fin>>edge[i];
for(int i=0; i<=eCount-1; i++)
fin>>departure[i];
for(int i=0; i<=eCount-1; i++)
fin>>duration[i];
//cout<<"reading the input is over"<<endl;
return true;
}
bool printInput(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration)
{
ofstream fout;
fout.open("csr2.txt");
for(int i=0; i<=vCount; i++)
fout<<vertex[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<edge[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<departure[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<duration[i]<<" ";
fout<<endl;
return true;
}
void initConfiguration(dim3 &grid, dim3 &block, int n)
{
int num_of_blocks = 1;
int num_of_threads_per_block = n;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(n>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(n/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
grid.x = num_of_blocks; grid.y=1; grid.z=1;
block.x = num_of_threads_per_block; block.y=1; block.z=1;
}
//**should be initialized with specified time instead of zero
void initArray(int *&X, int n)
{
X = new int[n];
for(int i=0; i<=n-1; i++)
{
X[i] = INF;
}
}
void cudaCopyToDevice(int *X, int *&cX, int n)
{
hipMalloc((void**)&cX, n*sizeof(int));
hipMemcpy( cX, X, n*sizeof(int), hipMemcpyHostToDevice);
}
__global__
void processVertex(int *vertex, int *edge, int *departure, int *duration, int *earliestTime,int *level)
{
int i,u,v,t, lambda;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i>=1024*100) return;
u = 0;
v = edge[i];
t = departure[i];
lambda = duration[i];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
level[v]=1;
}
}
__global__
void processVertices(int iterations, int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, bool *dContinue, int *level)
{
int i,j,u,v,t,lambda,degree;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i >= vCount) return;
//if(iterations==0 && i!=0) return;
//if(level[i] != iterations) return;
u = i;
degree = vertex[u+1] - vertex[u];
for(j=1; j<=degree; j++)
{
v = edge[vertex[u]+j-1];
t = departure[vertex[u]+j-1];
lambda = duration[vertex[u]+j-1];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
*dContinue=true;
//level[v]=iterations+1;
}
}
}
void computeEarliestTimes(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, int *level)
{
int iterations=0;
bool hContinue;
bool *dContinue;
hipMalloc( (void**) &dContinue, sizeof(bool));
//processVertex<<< 100, 1024>>>(vertex, edge, departure, duration, earliestTime,level);
iterations=1;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
hContinue=false;
hipMemcpy(dContinue, &hContinue, sizeof(bool), hipMemcpyHostToDevice) ;
processVertices<<< gridDimension, blockDimension, 0 >>>(iterations,vCount, eCount, vertex, edge, departure, duration, earliestTime,dContinue,level);
// check if kernel execution generated and error
//Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
hipMemcpy( &hContinue, dContinue, sizeof(bool), hipMemcpyDeviceToHost) ;
iterations++;
}
while(hContinue);
}
int main(int argc, char *argv[])
{
int vCount, eCount, source;
int *edge, *vertex, *departure, *duration, *earliestTime, *level;
int *cEdge, *cVertex, *cDeparture, *cDuration, *cEarliestTime, *cLevel;
char fileName[100];
struct timeval start,stop;
double time;
strcpy(fileName, argv[1]);
readInput(fileName,vCount, eCount, vertex, edge, departure, duration, source);
initConfiguration(gridDimension,blockDimension, vCount);
cudaCopyToDevice(vertex,cVertex,vCount);
cudaCopyToDevice(edge,cEdge,eCount);
cudaCopyToDevice(departure,cDeparture,eCount);
cudaCopyToDevice(duration,cDuration,eCount);
initArray(earliestTime,vCount);
earliestTime[source]=0; // starting time
cudaCopyToDevice(earliestTime,cEarliestTime,vCount);
//initArray(level,vCount);
// level[source]=0;
// cudaCopyToDevice(level,cLevel,vCount);
gettimeofday(&start,0);
computeEarliestTimes(vCount,eCount,cVertex,cEdge,cDeparture,cDuration,cEarliestTime,cLevel);
gettimeofday(&stop,0);
time = (1000000.0*(stop.tv_sec-start.tv_sec) + stop.tv_usec-start.tv_usec)/1000.0;
hipMemcpy(earliestTime, cEarliestTime, vCount*sizeof(int), hipMemcpyDeviceToHost);
hipFree(cEarliestTime);
hipFree(cEdge);
hipFree(cVertex);
hipFree(cDeparture);
hipFree(cDuration);
//cout<<"Memory copied"<<endl;
for(int i=0;i<=vCount-1;i++)
{
cout<<i<<" "<<earliestTime[i]<<endl;
//fprintf(fp1,"Earliest time for %d is %d\n",i,earliest[i]);
}
cout<<"Time is "<<time<<endl;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13processVertexPiS_S_S_S_S_
.globl _Z13processVertexPiS_S_S_S_S_
.p2align 8
.type _Z13processVertexPiS_S_S_S_S_,@function
_Z13processVertexPiS_S_S_S_S_:
v_lshl_or_b32 v0, s15, 10, v0
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x19000, v0
s_cbranch_execz .LBB0_4
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x10
s_load_b64 s[2:3], s[0:1], 0x20
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
s_load_b32 s4, s[2:3], 0x0
global_load_b32 v4, v[2:3], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_le_i32_e32 vcc_lo, s4, v4
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_4
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x8
s_load_b64 s[6:7], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v7, v[0:1], off
s_waitcnt vmcnt(1)
v_ashrrev_i32_e32 v3, 31, v2
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, v7, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[2:3]
v_add_co_u32 v0, vcc_lo, s2, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v6, vcc_lo
global_load_b32 v5, v[0:1], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v4, v5
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_4
s_load_b64 s[0:1], s[0:1], 0x28
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_mov_b32_e32 v5, 1
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_store_b32 v[0:1], v4, off
global_store_b32 v[2:3], v5, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13processVertexPiS_S_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 48
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13processVertexPiS_S_S_S_S_, .Lfunc_end0-_Z13processVertexPiS_S_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z15processVerticesiiiPiS_S_S_S_PbS_
.globl _Z15processVerticesiiiPiS_S_S_S_PbS_
.p2align 8
.type _Z15processVerticesiiiPiS_S_S_S_PbS_,@function
_Z15processVerticesiiiPiS_S_S_S_PbS_:
s_load_b32 s2, s[0:1], 0x4
v_lshl_or_b32 v0, s15, 10, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB1_7
s_load_b64 s[2:3], s[0:1], 0x10
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v3, vcc_lo
s_mov_b32 s2, 0
global_load_b64 v[4:5], v[0:1], off
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v6, v5, v4
v_cmp_lt_i32_e32 vcc_lo, 0, v6
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB1_7
s_clause 0x1
s_load_b256 s[4:11], s[0:1], 0x18
s_load_b64 s[0:1], s[0:1], 0x38
v_mov_b32_e32 v7, 0
s_mov_b32 s3, 0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s10, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v3, vcc_lo
s_branch .LBB1_4
.LBB1_3:
s_or_b32 exec_lo, exec_lo, s12
s_add_i32 s3, s3, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_ge_i32_e32 vcc_lo, s3, v6
s_or_b32 s2, vcc_lo, s2
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB1_7
.LBB1_4:
global_load_b32 v4, v[0:1], off
s_mov_b32 s12, exec_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, s3, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v5, vcc_lo
global_load_b32 v8, v[8:9], off
global_load_b32 v9, v[2:3], off
s_waitcnt vmcnt(0)
v_cmpx_le_i32_e64 v9, v8
s_cbranch_execz .LBB1_3
v_add_co_u32 v9, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v10, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v5, vcc_lo
global_load_b32 v9, v[9:10], off
global_load_b32 v11, v[4:5], off
s_waitcnt vmcnt(1)
v_ashrrev_i32_e32 v10, 31, v9
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v8, v11, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[9:10], 2, v[9:10]
v_add_co_u32 v4, vcc_lo, s10, v9
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v10, vcc_lo
global_load_b32 v9, v[4:5], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v8, v9
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB1_3
v_mov_b32_e32 v9, 1
global_store_b32 v[4:5], v8, off
global_store_b8 v7, v9, s[0:1]
s_branch .LBB1_3
.LBB1_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15processVerticesiiiPiS_S_S_S_PbS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 72
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z15processVerticesiiiPiS_S_S_S_PbS_, .Lfunc_end1-_Z15processVerticesiiiPiS_S_S_S_PbS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 48
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13processVertexPiS_S_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13processVertexPiS_S_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 72
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15processVerticesiiiPiS_S_S_S_PbS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15processVerticesiiiPiS_S_S_S_PbS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include<string.h>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<fstream>
#include<sys/time.h>
#include<hip/hip_runtime.h>
#define INF 10000;
#define MAX_THREADS_PER_BLOCK 1024
using namespace std;
dim3 gridDimension;
dim3 blockDimension;
bool readInput(char *fileName, int &vCount, int &eCount, int *&vertex, int *&edge, int *&departure, int *&duration, int &source)
{
ifstream fin;
fin.open(fileName);
fin>>vCount>>eCount>>source;
vertex = new int[vCount+1];
edge = new int[eCount];
departure = new int[eCount];
duration = new int[eCount];
for(int i=0; i<=vCount; i++)
fin>>vertex[i];
for(int i=0; i<=eCount-1; i++)
fin>>edge[i];
for(int i=0; i<=eCount-1; i++)
fin>>departure[i];
for(int i=0; i<=eCount-1; i++)
fin>>duration[i];
//cout<<"reading the input is over"<<endl;
return true;
}
bool printInput(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration)
{
ofstream fout;
fout.open("csr2.txt");
for(int i=0; i<=vCount; i++)
fout<<vertex[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<edge[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<departure[i]<<" ";
fout<<endl;
for(int i=0; i<=eCount-1; i++)
fout<<duration[i]<<" ";
fout<<endl;
return true;
}
void initConfiguration(dim3 &grid, dim3 &block, int n)
{
int num_of_blocks = 1;
int num_of_threads_per_block = n;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(n>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(n/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
grid.x = num_of_blocks; grid.y=1; grid.z=1;
block.x = num_of_threads_per_block; block.y=1; block.z=1;
}
//**should be initialized with specified time instead of zero
void initArray(int *&X, int n)
{
X = new int[n];
for(int i=0; i<=n-1; i++)
{
X[i] = INF;
}
}
void cudaCopyToDevice(int *X, int *&cX, int n)
{
hipMalloc((void**)&cX, n*sizeof(int));
hipMemcpy( cX, X, n*sizeof(int), hipMemcpyHostToDevice);
}
__global__
void processVertex(int *vertex, int *edge, int *departure, int *duration, int *earliestTime,int *level)
{
int i,u,v,t, lambda;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i>=1024*100) return;
u = 0;
v = edge[i];
t = departure[i];
lambda = duration[i];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
level[v]=1;
}
}
__global__
void processVertices(int iterations, int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, bool *dContinue, int *level)
{
int i,j,u,v,t,lambda,degree;
i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i >= vCount) return;
//if(iterations==0 && i!=0) return;
//if(level[i] != iterations) return;
u = i;
degree = vertex[u+1] - vertex[u];
for(j=1; j<=degree; j++)
{
v = edge[vertex[u]+j-1];
t = departure[vertex[u]+j-1];
lambda = duration[vertex[u]+j-1];
if(earliestTime[u]<=t && t+lambda < earliestTime[v])
{ //if(i==0){printf("first thread updating:after \n"); }
earliestTime[v]= t + lambda;
*dContinue=true;
//level[v]=iterations+1;
}
}
}
void computeEarliestTimes(int vCount, int eCount, int *vertex, int *edge, int *departure, int *duration, int *earliestTime, int *level)
{
int iterations=0;
bool hContinue;
bool *dContinue;
hipMalloc( (void**) &dContinue, sizeof(bool));
//processVertex<<< 100, 1024>>>(vertex, edge, departure, duration, earliestTime,level);
iterations=1;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
hContinue=false;
hipMemcpy(dContinue, &hContinue, sizeof(bool), hipMemcpyHostToDevice) ;
processVertices<<< gridDimension, blockDimension, 0 >>>(iterations,vCount, eCount, vertex, edge, departure, duration, earliestTime,dContinue,level);
// check if kernel execution generated and error
//Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
hipMemcpy( &hContinue, dContinue, sizeof(bool), hipMemcpyDeviceToHost) ;
iterations++;
}
while(hContinue);
}
int main(int argc, char *argv[])
{
int vCount, eCount, source;
int *edge, *vertex, *departure, *duration, *earliestTime, *level;
int *cEdge, *cVertex, *cDeparture, *cDuration, *cEarliestTime, *cLevel;
char fileName[100];
struct timeval start,stop;
double time;
strcpy(fileName, argv[1]);
readInput(fileName,vCount, eCount, vertex, edge, departure, duration, source);
initConfiguration(gridDimension,blockDimension, vCount);
cudaCopyToDevice(vertex,cVertex,vCount);
cudaCopyToDevice(edge,cEdge,eCount);
cudaCopyToDevice(departure,cDeparture,eCount);
cudaCopyToDevice(duration,cDuration,eCount);
initArray(earliestTime,vCount);
earliestTime[source]=0; // starting time
cudaCopyToDevice(earliestTime,cEarliestTime,vCount);
//initArray(level,vCount);
// level[source]=0;
// cudaCopyToDevice(level,cLevel,vCount);
gettimeofday(&start,0);
computeEarliestTimes(vCount,eCount,cVertex,cEdge,cDeparture,cDuration,cEarliestTime,cLevel);
gettimeofday(&stop,0);
time = (1000000.0*(stop.tv_sec-start.tv_sec) + stop.tv_usec-start.tv_usec)/1000.0;
hipMemcpy(earliestTime, cEarliestTime, vCount*sizeof(int), hipMemcpyDeviceToHost);
hipFree(cEarliestTime);
hipFree(cEdge);
hipFree(cVertex);
hipFree(cDeparture);
hipFree(cDuration);
//cout<<"Memory copied"<<endl;
for(int i=0;i<=vCount-1;i++)
{
cout<<i<<" "<<earliestTime[i]<<endl;
//fprintf(fp1,"Earliest time for %d is %d\n",i,earliest[i]);
}
cout<<"Time is "<<time<<endl;
return 0;
} | .text
.file "node1.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z9readInputPcRiS0_RPiS2_S2_S2_S0_ # -- Begin function _Z9readInputPcRiS0_RPiS2_S2_S2_S0_
.p2align 4, 0x90
.type _Z9readInputPcRiS0_RPiS2_S2_S2_S0_,@function
_Z9readInputPcRiS0_RPiS2_S2_S2_S0_: # @_Z9readInputPcRiS0_RPiS2_S2_S2_S0_
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 592
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, (%rsp) # 8-byte Spill
movq %r8, 8(%rsp) # 8-byte Spill
movq %rcx, %r12
movq %rdx, %rbx
movq %rsi, %r13
movq %rdi, %r14
leaq 16(%rsp), %rbp
movq %rbp, %rdi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1Ev
leaq 32(%rsp), %rdi
.Ltmp0:
movq %r14, %rsi
movl $8, %edx
callq _ZNSt13basic_filebufIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode
.Ltmp1:
# %bb.1: # %.noexc
movq 16(%rsp), %rcx
addq -24(%rcx), %rbp
xorl %esi, %esi
testq %rax, %rax
jne .LBB0_3
# %bb.2:
movl 32(%rbp), %esi
orl $4, %esi
.LBB0_3: # %.invoke
.Ltmp2:
movq %rbp, %rdi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.Ltmp3:
# %bb.4: # %_ZNSt14basic_ifstreamIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode.exit
.Ltmp4:
leaq 16(%rsp), %rdi
movq %r13, %rsi
callq _ZNSirsERi
.Ltmp5:
# %bb.5:
.Ltmp6:
movq %rax, %rdi
movq %rbx, %rsi
callq _ZNSirsERi
.Ltmp7:
# %bb.6:
.Ltmp8:
movq 600(%rsp), %rsi
movq %rax, %rdi
callq _ZNSirsERi
.Ltmp9:
# %bb.7:
movslq (%r13), %rax
cmpq $-1, %rax
leaq 4(,%rax,4), %rdi
movq $-1, %r14
cmovlq %r14, %rdi
.Ltmp10:
callq _Znam
.Ltmp11:
# %bb.8:
movq %rax, (%r12)
movslq (%rbx), %rax
leaq (,%rax,4), %rbp
testq %rax, %rax
cmovsq %r14, %rbp
.Ltmp12:
movq %rbp, %rdi
callq _Znam
.Ltmp13:
# %bb.9:
movq 8(%rsp), %rcx # 8-byte Reload
movq %rax, (%rcx)
.Ltmp14:
movq %rbp, %rdi
callq _Znam
.Ltmp15:
# %bb.10:
movq (%rsp), %rcx # 8-byte Reload
movq %rax, (%rcx)
.Ltmp16:
movq %rbp, %rdi
callq _Znam
.Ltmp17:
# %bb.11:
movq 592(%rsp), %rcx
movq %rax, (%rcx)
cmpl $0, (%r13)
js .LBB0_15
# %bb.12: # %.lr.ph.preheader
movq $-1, %r14
xorl %r15d, %r15d
leaq 16(%rsp), %rbp
.p2align 4, 0x90
.LBB0_13: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq (%r12), %rsi
addq %r15, %rsi
.Ltmp19:
movq %rbp, %rdi
callq _ZNSirsERi
.Ltmp20:
# %bb.14: # in Loop: Header=BB0_13 Depth=1
movslq (%r13), %rax
incq %r14
addq $4, %r15
cmpq %rax, %r14
jl .LBB0_13
.LBB0_15: # %.preheader45
cmpl $0, (%rbx)
movq (%rsp), %r13 # 8-byte Reload
movq 8(%rsp), %rbp # 8-byte Reload
jle .LBB0_19
# %bb.16: # %.lr.ph50.preheader
xorl %r14d, %r14d
leaq 16(%rsp), %r12
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_17: # %.lr.ph50
# =>This Inner Loop Header: Depth=1
movq (%rbp), %rsi
addq %r14, %rsi
.Ltmp22:
movq %r12, %rdi
callq _ZNSirsERi
.Ltmp23:
# %bb.18: # in Loop: Header=BB0_17 Depth=1
incq %r15
movslq (%rbx), %rax
addq $4, %r14
cmpq %rax, %r15
jl .LBB0_17
.LBB0_19: # %.preheader44
cmpl $0, (%rbx)
jle .LBB0_23
# %bb.20: # %.lr.ph53.preheader
xorl %r14d, %r14d
leaq 16(%rsp), %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB0_21: # %.lr.ph53
# =>This Inner Loop Header: Depth=1
movq (%r13), %rsi
addq %r14, %rsi
.Ltmp25:
movq %r15, %rdi
callq _ZNSirsERi
.Ltmp26:
# %bb.22: # in Loop: Header=BB0_21 Depth=1
incq %r12
movslq (%rbx), %rax
addq $4, %r14
cmpq %rax, %r12
jl .LBB0_21
.LBB0_23: # %.preheader
cmpl $0, (%rbx)
movq 592(%rsp), %r13
jle .LBB0_27
# %bb.24: # %.lr.ph56.preheader
xorl %r15d, %r15d
leaq 16(%rsp), %r14
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB0_25: # %.lr.ph56
# =>This Inner Loop Header: Depth=1
movq (%r13), %rsi
addq %r15, %rsi
.Ltmp28:
movq %r14, %rdi
callq _ZNSirsERi
.Ltmp29:
# %bb.26: # in Loop: Header=BB0_25 Depth=1
incq %r12
movslq (%rbx), %rax
addq $4, %r15
cmpq %rax, %r12
jl .LBB0_25
.LBB0_27: # %._crit_edge
leaq 16(%rsp), %rdi
movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev
leaq 272(%rsp), %rdi
callq _ZNSt8ios_baseD2Ev
movb $1, %al
addq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_28:
.cfi_def_cfa_offset 592
.Ltmp18:
jmp .LBB0_33
.LBB0_32:
.Ltmp30:
jmp .LBB0_33
.LBB0_31:
.Ltmp27:
jmp .LBB0_33
.LBB0_30:
.Ltmp24:
jmp .LBB0_33
.LBB0_29:
.Ltmp21:
.LBB0_33:
movq %rax, %rbx
leaq 16(%rsp), %rdi
movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev
leaq 272(%rsp), %rdi
callq _ZNSt8ios_baseD2Ev
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end0:
.size _Z9readInputPcRiS0_RPiS2_S2_S2_S0_, .Lfunc_end0-_Z9readInputPcRiS0_RPiS2_S2_S2_S0_
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table0:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp17-.Ltmp0 # Call between .Ltmp0 and .Ltmp17
.uleb128 .Ltmp18-.Lfunc_begin0 # jumps to .Ltmp18
.byte 0 # On action: cleanup
.uleb128 .Ltmp19-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp20-.Ltmp19 # Call between .Ltmp19 and .Ltmp20
.uleb128 .Ltmp21-.Lfunc_begin0 # jumps to .Ltmp21
.byte 0 # On action: cleanup
.uleb128 .Ltmp22-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp23-.Ltmp22 # Call between .Ltmp22 and .Ltmp23
.uleb128 .Ltmp24-.Lfunc_begin0 # jumps to .Ltmp24
.byte 0 # On action: cleanup
.uleb128 .Ltmp25-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp26-.Ltmp25 # Call between .Ltmp25 and .Ltmp26
.uleb128 .Ltmp27-.Lfunc_begin0 # jumps to .Ltmp27
.byte 0 # On action: cleanup
.uleb128 .Ltmp28-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp29-.Ltmp28 # Call between .Ltmp28 and .Ltmp29
.uleb128 .Ltmp30-.Lfunc_begin0 # jumps to .Ltmp30
.byte 0 # On action: cleanup
.uleb128 .Ltmp29-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Lfunc_end0-.Ltmp29 # Call between .Ltmp29 and .Lfunc_end0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.globl _Z10printInputiiPiS_S_S_ # -- Begin function _Z10printInputiiPiS_S_S_
.p2align 4, 0x90
.type _Z10printInputiiPiS_S_S_,@function
_Z10printInputiiPiS_S_S_: # @_Z10printInputiiPiS_S_S_
.Lfunc_begin1:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception1
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 592
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 16(%rsp) # 8-byte Spill
movq %r8, %r14
movq %rcx, %r15
movq %rdx, %r12
movl %esi, 12(%rsp) # 4-byte Spill
movl %edi, %r13d
leaq 24(%rsp), %rbp
movq %rbp, %rdi
callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1Ev
leaq 32(%rsp), %rdi
.Ltmp31:
movl $.L.str, %esi
movl $16, %edx
callq _ZNSt13basic_filebufIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode
.Ltmp32:
# %bb.1: # %.noexc
movq 24(%rsp), %rcx
addq -24(%rcx), %rbp
xorl %esi, %esi
testq %rax, %rax
jne .LBB1_3
# %bb.2:
movl 32(%rbp), %esi
orl $4, %esi
.LBB1_3: # %.invoke112
.Ltmp33:
movq %rbp, %rdi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.Ltmp34:
# %bb.4: # %_ZNSt14basic_ofstreamIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode.exit
testl %r13d, %r13d
js .LBB1_9
# %bb.5: # %.lr.ph.preheader
incl %r13d
xorl %ebx, %ebx
leaq 24(%rsp), %rbp
.p2align 4, 0x90
.LBB1_6: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%r12,%rbx,4), %esi
.Ltmp35:
movq %rbp, %rdi
callq _ZNSolsEi
.Ltmp36:
# %bb.7: # in Loop: Header=BB1_6 Depth=1
.Ltmp37:
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp38:
# %bb.8: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
# in Loop: Header=BB1_6 Depth=1
incq %rbx
cmpq %rbx, %r13
jne .LBB1_6
.LBB1_9: # %._crit_edge
movq 24(%rsp), %rax
movq -24(%rax), %rax
movq 264(%rsp,%rax), %r12
testq %r12, %r12
je .LBB1_46
# %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r12)
je .LBB1_12
# %bb.11:
movzbl 67(%r12), %eax
jmp .LBB1_14
.LBB1_12:
.Ltmp40:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp41:
# %bb.13: # %.noexc48
movq (%r12), %rax
.Ltmp42:
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp43:
.LBB1_14: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i
.Ltmp44:
movsbl %al, %esi
leaq 24(%rsp), %rdi
callq _ZNSo3putEc
.Ltmp45:
# %bb.15: # %.noexc50
.Ltmp46:
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp47:
# %bb.16: # %_ZNSolsEPFRSoS_E.exit.preheader
movl 12(%rsp), %eax # 4-byte Reload
movl %eax, %r13d
testl %eax, %eax
jle .LBB1_21
# %bb.17: # %.lr.ph89.preheader
xorl %ebx, %ebx
leaq 24(%rsp), %r12
.p2align 4, 0x90
.LBB1_18: # %.lr.ph89
# =>This Inner Loop Header: Depth=1
movl (%r15,%rbx,4), %esi
.Ltmp48:
movq %r12, %rdi
callq _ZNSolsEi
.Ltmp49:
# %bb.19: # in Loop: Header=BB1_18 Depth=1
.Ltmp50:
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp51:
# %bb.20: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit38
# in Loop: Header=BB1_18 Depth=1
incq %rbx
cmpq %rbx, %r13
jne .LBB1_18
.LBB1_21: # %_ZNSolsEPFRSoS_E.exit._crit_edge
movq 24(%rsp), %rax
movq -24(%rax), %rax
movq 264(%rsp,%rax), %r15
testq %r15, %r15
je .LBB1_46
# %bb.22: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i53
cmpb $0, 56(%r15)
je .LBB1_24
# %bb.23:
movzbl 67(%r15), %eax
jmp .LBB1_26
.LBB1_24:
.Ltmp53:
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp54:
# %bb.25: # %.noexc58
movq (%r15), %rax
.Ltmp55:
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp56:
.LBB1_26: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i55
.Ltmp57:
movsbl %al, %esi
leaq 24(%rsp), %rdi
callq _ZNSo3putEc
.Ltmp58:
# %bb.27: # %.noexc60
.Ltmp59:
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp60:
# %bb.28: # %_ZNSolsEPFRSoS_E.exit36.preheader
cmpl $0, 12(%rsp) # 4-byte Folded Reload
jle .LBB1_33
# %bb.29: # %.lr.ph92.preheader
xorl %ebx, %ebx
leaq 24(%rsp), %r15
.p2align 4, 0x90
.LBB1_30: # %.lr.ph92
# =>This Inner Loop Header: Depth=1
movl (%r14,%rbx,4), %esi
.Ltmp61:
movq %r15, %rdi
callq _ZNSolsEi
.Ltmp62:
# %bb.31: # in Loop: Header=BB1_30 Depth=1
.Ltmp63:
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp64:
# %bb.32: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit42
# in Loop: Header=BB1_30 Depth=1
incq %rbx
cmpq %rbx, %r13
jne .LBB1_30
.LBB1_33: # %_ZNSolsEPFRSoS_E.exit36._crit_edge
movq 24(%rsp), %rax
movq -24(%rax), %rax
movq 264(%rsp,%rax), %r14
testq %r14, %r14
je .LBB1_46
# %bb.34: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i64
cmpb $0, 56(%r14)
je .LBB1_36
# %bb.35:
movzbl 67(%r14), %eax
jmp .LBB1_38
.LBB1_36:
.Ltmp66:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp67:
# %bb.37: # %.noexc69
movq (%r14), %rax
.Ltmp68:
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp69:
.LBB1_38: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i66
.Ltmp70:
movsbl %al, %esi
leaq 24(%rsp), %rdi
callq _ZNSo3putEc
.Ltmp71:
# %bb.39: # %.noexc71
.Ltmp72:
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp73:
# %bb.40: # %_ZNSolsEPFRSoS_E.exit40.preheader
cmpl $0, 12(%rsp) # 4-byte Folded Reload
movq 16(%rsp), %r15 # 8-byte Reload
jle .LBB1_45
# %bb.41: # %.lr.ph95.preheader
xorl %ebx, %ebx
leaq 24(%rsp), %r14
.p2align 4, 0x90
.LBB1_42: # %.lr.ph95
# =>This Inner Loop Header: Depth=1
movl (%r15,%rbx,4), %esi
.Ltmp74:
movq %r14, %rdi
callq _ZNSolsEi
.Ltmp75:
# %bb.43: # in Loop: Header=BB1_42 Depth=1
.Ltmp76:
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp77:
# %bb.44: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit46
# in Loop: Header=BB1_42 Depth=1
incq %rbx
cmpq %rbx, %r13
jne .LBB1_42
.LBB1_45: # %_ZNSolsEPFRSoS_E.exit40._crit_edge
movq 24(%rsp), %rax
movq -24(%rax), %rax
movq 264(%rsp,%rax), %rbx
testq %rbx, %rbx
je .LBB1_46
# %bb.48: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i75
cmpb $0, 56(%rbx)
je .LBB1_50
# %bb.49:
movzbl 67(%rbx), %eax
jmp .LBB1_52
.LBB1_50:
.Ltmp79:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp80:
# %bb.51: # %.noexc80
movq (%rbx), %rax
.Ltmp81:
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp82:
.LBB1_52: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i77
.Ltmp83:
movsbl %al, %esi
leaq 24(%rsp), %rdi
callq _ZNSo3putEc
.Ltmp84:
# %bb.53: # %.noexc82
.Ltmp85:
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp86:
# %bb.54: # %_ZNSolsEPFRSoS_E.exit44
leaq 24(%rsp), %rdi
callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev
movb $1, %al
addq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_46: # %.invoke
.cfi_def_cfa_offset 592
.Ltmp87:
callq _ZSt16__throw_bad_castv
.Ltmp88:
# %bb.47: # %.cont
.LBB1_55:
.Ltmp89:
jmp .LBB1_56
.LBB1_57:
.Ltmp78:
jmp .LBB1_56
.LBB1_60:
.Ltmp65:
jmp .LBB1_56
.LBB1_59:
.Ltmp52:
jmp .LBB1_56
.LBB1_58:
.Ltmp39:
.LBB1_56:
movq %rax, %rbx
leaq 24(%rsp), %rdi
callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end1:
.size _Z10printInputiiPiS_S_S_, .Lfunc_end1-_Z10printInputiiPiS_S_S_
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table1:
.Lexception1:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end1-.Lcst_begin1
.Lcst_begin1:
.uleb128 .Lfunc_begin1-.Lfunc_begin1 # >> Call Site 1 <<
.uleb128 .Ltmp31-.Lfunc_begin1 # Call between .Lfunc_begin1 and .Ltmp31
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp31-.Lfunc_begin1 # >> Call Site 2 <<
.uleb128 .Ltmp34-.Ltmp31 # Call between .Ltmp31 and .Ltmp34
.uleb128 .Ltmp89-.Lfunc_begin1 # jumps to .Ltmp89
.byte 0 # On action: cleanup
.uleb128 .Ltmp35-.Lfunc_begin1 # >> Call Site 3 <<
.uleb128 .Ltmp38-.Ltmp35 # Call between .Ltmp35 and .Ltmp38
.uleb128 .Ltmp39-.Lfunc_begin1 # jumps to .Ltmp39
.byte 0 # On action: cleanup
.uleb128 .Ltmp40-.Lfunc_begin1 # >> Call Site 4 <<
.uleb128 .Ltmp47-.Ltmp40 # Call between .Ltmp40 and .Ltmp47
.uleb128 .Ltmp89-.Lfunc_begin1 # jumps to .Ltmp89
.byte 0 # On action: cleanup
.uleb128 .Ltmp48-.Lfunc_begin1 # >> Call Site 5 <<
.uleb128 .Ltmp51-.Ltmp48 # Call between .Ltmp48 and .Ltmp51
.uleb128 .Ltmp52-.Lfunc_begin1 # jumps to .Ltmp52
.byte 0 # On action: cleanup
.uleb128 .Ltmp53-.Lfunc_begin1 # >> Call Site 6 <<
.uleb128 .Ltmp60-.Ltmp53 # Call between .Ltmp53 and .Ltmp60
.uleb128 .Ltmp89-.Lfunc_begin1 # jumps to .Ltmp89
.byte 0 # On action: cleanup
.uleb128 .Ltmp61-.Lfunc_begin1 # >> Call Site 7 <<
.uleb128 .Ltmp64-.Ltmp61 # Call between .Ltmp61 and .Ltmp64
.uleb128 .Ltmp65-.Lfunc_begin1 # jumps to .Ltmp65
.byte 0 # On action: cleanup
.uleb128 .Ltmp66-.Lfunc_begin1 # >> Call Site 8 <<
.uleb128 .Ltmp73-.Ltmp66 # Call between .Ltmp66 and .Ltmp73
.uleb128 .Ltmp89-.Lfunc_begin1 # jumps to .Ltmp89
.byte 0 # On action: cleanup
.uleb128 .Ltmp74-.Lfunc_begin1 # >> Call Site 9 <<
.uleb128 .Ltmp77-.Ltmp74 # Call between .Ltmp74 and .Ltmp77
.uleb128 .Ltmp78-.Lfunc_begin1 # jumps to .Ltmp78
.byte 0 # On action: cleanup
.uleb128 .Ltmp79-.Lfunc_begin1 # >> Call Site 10 <<
.uleb128 .Ltmp88-.Ltmp79 # Call between .Ltmp79 and .Ltmp88
.uleb128 .Ltmp89-.Lfunc_begin1 # jumps to .Ltmp89
.byte 0 # On action: cleanup
.uleb128 .Ltmp88-.Lfunc_begin1 # >> Call Site 11 <<
.uleb128 .Lfunc_end1-.Ltmp88 # Call between .Ltmp88 and .Lfunc_end1
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end1:
.p2align 2, 0x0
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z17initConfigurationR4dim3S0_i
.LCPI2_0:
.quad 0x3f50000000000000 # double 9.765625E-4
.text
.globl _Z17initConfigurationR4dim3S0_i
.p2align 4, 0x90
.type _Z17initConfigurationR4dim3S0_i,@function
_Z17initConfigurationR4dim3S0_i: # @_Z17initConfigurationR4dim3S0_i
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rsi, %rbx
movq %rdi, %r14
movl $1, %eax
cmpl $1025, %edx # imm = 0x401
jl .LBB2_2
# %bb.1:
cvtsi2sd %edx, %xmm0
mulsd .LCPI2_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %eax
movl $1024, %edx # imm = 0x400
.LBB2_2:
movl %eax, (%r14)
movabsq $4294967297, %rax # imm = 0x100000001
movq %rax, 4(%r14)
movl %edx, (%rbx)
movq %rax, 4(%rbx)
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z17initConfigurationR4dim3S0_i, .Lfunc_end2-_Z17initConfigurationR4dim3S0_i
.cfi_endproc
# -- End function
.globl _Z9initArrayRPii # -- Begin function _Z9initArrayRPii
.p2align 4, 0x90
.type _Z9initArrayRPii,@function
_Z9initArrayRPii: # @_Z9initArrayRPii
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %esi, %ebx
movq %rdi, %r14
movslq %esi, %r15
leaq (,%r15,4), %rax
testl %r15d, %r15d
movq $-1, %rdi
cmovnsq %rax, %rdi
callq _Znam
movq %rax, (%r14)
testl %r15d, %r15d
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %ebx, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $10000, (%rax,%rdx,4) # imm = 0x2710
incq %rdx
cmpq %rdx, %rcx
jne .LBB3_2
.LBB3_3: # %._crit_edge
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z9initArrayRPii, .Lfunc_end3-_Z9initArrayRPii
.cfi_endproc
# -- End function
.globl _Z16cudaCopyToDevicePiRS_i # -- Begin function _Z16cudaCopyToDevicePiRS_i
.p2align 4, 0x90
.type _Z16cudaCopyToDevicePiRS_i,@function
_Z16cudaCopyToDevicePiRS_i: # @_Z16cudaCopyToDevicePiRS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq %rdi, %r14
movslq %edx, %r15
shlq $2, %r15
movq %rsi, %rdi
movq %r15, %rsi
callq hipMalloc
movq (%rbx), %rdi
movq %r14, %rsi
movq %r15, %rdx
movl $1, %ecx
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
jmp hipMemcpy # TAILCALL
.Lfunc_end4:
.size _Z16cudaCopyToDevicePiRS_i, .Lfunc_end4-_Z16cudaCopyToDevicePiRS_i
.cfi_endproc
# -- End function
.globl _Z28__device_stub__processVertexPiS_S_S_S_S_ # -- Begin function _Z28__device_stub__processVertexPiS_S_S_S_S_
.p2align 4, 0x90
.type _Z28__device_stub__processVertexPiS_S_S_S_S_,@function
_Z28__device_stub__processVertexPiS_S_S_S_S_: # @_Z28__device_stub__processVertexPiS_S_S_S_S_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13processVertexPiS_S_S_S_S_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end5:
.size _Z28__device_stub__processVertexPiS_S_S_S_S_, .Lfunc_end5-_Z28__device_stub__processVertexPiS_S_S_S_S_
.cfi_endproc
# -- End function
.globl _Z30__device_stub__processVerticesiiiPiS_S_S_S_PbS_ # -- Begin function _Z30__device_stub__processVerticesiiiPiS_S_S_S_PbS_
.p2align 4, 0x90
.type _Z30__device_stub__processVerticesiiiPiS_S_S_S_PbS_,@function
_Z30__device_stub__processVerticesiiiPiS_S_S_S_PbS_: # @_Z30__device_stub__processVerticesiiiPiS_S_S_S_PbS_
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, 88(%rsp)
movq %r8, 80(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z15processVerticesiiiPiS_S_S_S_PbS_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end6:
.size _Z30__device_stub__processVerticesiiiPiS_S_S_S_PbS_, .Lfunc_end6-_Z30__device_stub__processVerticesiiiPiS_S_S_S_PbS_
.cfi_endproc
# -- End function
.globl _Z20computeEarliestTimesiiPiS_S_S_S_S_ # -- Begin function _Z20computeEarliestTimesiiPiS_S_S_S_S_
.p2align 4, 0x90
.type _Z20computeEarliestTimesiiPiS_S_S_S_S_,@function
_Z20computeEarliestTimesiiPiS_S_S_S_S_: # @_Z20computeEarliestTimesiiPiS_S_S_S_S_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $248, %rsp
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 48(%rsp) # 8-byte Spill
movq %r8, 40(%rsp) # 8-byte Spill
movq %rcx, %r15
movq %rdx, %r12
movl %esi, %ebp
movl %edi, %r13d
leaq 16(%rsp), %rdi
movl $1, %esi
callq hipMalloc
movl $1, %r14d
leaq 15(%rsp), %rbx
jmp .LBB7_1
.p2align 4, 0x90
.LBB7_3: # in Loop: Header=BB7_1 Depth=1
movq 16(%rsp), %rsi
movl $1, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
incl %r14d
cmpb $0, 15(%rsp)
je .LBB7_4
.LBB7_1: # =>This Inner Loop Header: Depth=1
movb $0, 15(%rsp)
movq 16(%rsp), %rdi
movl $1, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq gridDimension(%rip), %rdi
movl gridDimension+8(%rip), %esi
movq blockDimension(%rip), %rdx
movl blockDimension+8(%rip), %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_3
# %bb.2: # in Loop: Header=BB7_1 Depth=1
movq 16(%rsp), %rax
movq %rax, 112(%rsp)
movl %r14d, 36(%rsp)
movl %r13d, 32(%rsp)
movl %ebp, 28(%rsp)
movq %r12, 152(%rsp)
movq %r15, 144(%rsp)
movq 40(%rsp), %rax # 8-byte Reload
movq %rax, 136(%rsp)
movq 48(%rsp), %rax # 8-byte Reload
movq %rax, 128(%rsp)
movq 304(%rsp), %rax
movq %rax, 120(%rsp)
movq 312(%rsp), %rax
movq %rax, 104(%rsp)
leaq 36(%rsp), %rax
movq %rax, 160(%rsp)
leaq 32(%rsp), %rax
movq %rax, 168(%rsp)
leaq 28(%rsp), %rax
movq %rax, 176(%rsp)
leaq 152(%rsp), %rax
movq %rax, 184(%rsp)
leaq 144(%rsp), %rax
movq %rax, 192(%rsp)
leaq 136(%rsp), %rax
movq %rax, 200(%rsp)
leaq 128(%rsp), %rax
movq %rax, 208(%rsp)
leaq 120(%rsp), %rax
movq %rax, 216(%rsp)
leaq 112(%rsp), %rax
movq %rax, 224(%rsp)
leaq 104(%rsp), %rax
movq %rax, 232(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
movl $_Z15processVerticesiiiPiS_S_S_S_PbS_, %edi
leaq 160(%rsp), %r9
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB7_3
.LBB7_4:
addq $248, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size _Z20computeEarliestTimesiiPiS_S_S_S_S_, .Lfunc_end7-_Z20computeEarliestTimesiiPiS_S_S_S_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI8_0:
.quad 0x3f50000000000000 # double 9.765625E-4
.LCPI8_1:
.quad 0x412e848000000000 # double 1.0E+6
.LCPI8_2:
.quad 0x408f400000000000 # double 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $248, %rsp
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq 8(%rsi), %rsi
leaq 144(%rsp), %rbx
movq %rbx, %rdi
callq strcpy
leaq 68(%rsp), %rax
leaq 80(%rsp), %r10
leaq 8(%rsp), %rsi
leaq 12(%rsp), %rdx
leaq 96(%rsp), %rcx
leaq 104(%rsp), %r8
leaq 88(%rsp), %r9
movq %rbx, %rdi
pushq %rax
.cfi_adjust_cfa_offset 8
pushq %r10
.cfi_adjust_cfa_offset 8
callq _Z9readInputPcRiS0_RPiS2_S2_S2_S0_
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movslq 8(%rsp), %rbx
movl $1, %eax
cmpq $1025, %rbx # imm = 0x401
jl .LBB8_1
# %bb.2:
cvtsi2sd %ebx, %xmm0
mulsd .LCPI8_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %eax
movl $1024, %ecx # imm = 0x400
jmp .LBB8_3
.LBB8_1:
movl %ebx, %ecx
.LBB8_3: # %_Z17initConfigurationR4dim3S0_i.exit
movl %eax, gridDimension(%rip)
movabsq $4294967297, %rax # imm = 0x100000001
movq %rax, gridDimension+4(%rip)
movl %ecx, blockDimension(%rip)
movq %rax, blockDimension+4(%rip)
movq 96(%rsp), %r14
shlq $2, %rbx
leaq 48(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 48(%rsp), %rdi
movq %r14, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq 104(%rsp), %rbx
movslq 12(%rsp), %r14
shlq $2, %r14
leaq 56(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 56(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 88(%rsp), %rbx
movslq 12(%rsp), %r14
shlq $2, %r14
leaq 40(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 40(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 80(%rsp), %rbx
movslq 12(%rsp), %r14
shlq $2, %r14
leaq 32(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 32(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movslq 8(%rsp), %r15
leaq (,%r15,4), %r14
testq %r15, %r15
movq $-1, %rdi
cmovnsq %r14, %rdi
callq _Znam
movq %rax, %rbx
testq %r15, %r15
jle .LBB8_6
# %bb.4: # %.lr.ph.preheader.i
movl %r15d, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB8_5: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl $10000, (%rbx,%rcx,4) # imm = 0x2710
incq %rcx
cmpq %rcx, %rax
jne .LBB8_5
.LBB8_6: # %_Z9initArrayRPii.exit
movslq 68(%rsp), %rax
movl $0, (%rbx,%rax,4)
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 128(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movl 8(%rsp), %edi
movl 12(%rsp), %esi
movq 48(%rsp), %rdx
movq 56(%rsp), %rcx
movq 40(%rsp), %r8
movq 32(%rsp), %r9
subq $8, %rsp
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq _Z20computeEarliestTimesiiPiS_S_S_S_S_
addq $16, %rsp
.cfi_adjust_cfa_offset -16
leaq 112(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 112(%rsp), %rbp
movq 120(%rsp), %rax
movq %rax, 72(%rsp) # 8-byte Spill
movq 128(%rsp), %r12
movq 136(%rsp), %rax
movq %rax, 24(%rsp) # 8-byte Spill
movq 16(%rsp), %rsi
movslq 8(%rsp), %rdx
shlq $2, %rdx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
cmpl $0, 8(%rsp)
jle .LBB8_11
# %bb.7: # %.lr.ph.preheader
xorl %r14d, %r14d
jmp .LBB8_8
.p2align 4, 0x90
.LBB8_16: # in Loop: Header=BB8_8 Depth=1
movq %r15, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB8_17: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit10
# in Loop: Header=BB8_8 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r14
movslq 8(%rsp), %rax
cmpq %rax, %r14
jge .LBB8_11
.LBB8_8: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $_ZSt4cout, %edi
movl %r14d, %esi
callq _ZNSolsEi
movq %rax, %r15
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%rbx,%r14,4), %esi
movq %r15, %rdi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r15
testq %r15, %r15
je .LBB8_18
# %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i7
# in Loop: Header=BB8_8 Depth=1
cmpb $0, 56(%r15)
je .LBB8_16
# %bb.10: # in Loop: Header=BB8_8 Depth=1
movzbl 67(%r15), %ecx
jmp .LBB8_17
.LBB8_11: # %._crit_edge
subq %r12, %rbp
cvtsi2sd %rbp, %xmm0
mulsd .LCPI8_1(%rip), %xmm0
cvtsi2sdq 72(%rsp), %xmm2 # 8-byte Folded Reload
cvtsi2sdq 24(%rsp), %xmm1 # 8-byte Folded Reload
addsd %xmm0, %xmm2
subsd %xmm1, %xmm2
divsd .LCPI8_2(%rip), %xmm2
movsd %xmm2, 24(%rsp) # 8-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd 24(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB8_18
# %bb.12: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB8_14
# %bb.13:
movzbl 67(%rbx), %ecx
jmp .LBB8_15
.LBB8_14:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB8_15: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $248, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB8_18:
.cfi_def_cfa_offset 304
callq _ZSt16__throw_bad_castv
.Lfunc_end8:
.size main, .Lfunc_end8-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB9_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB9_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13processVertexPiS_S_S_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15processVerticesiiiPiS_S_S_S_PbS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end9:
.size __hip_module_ctor, .Lfunc_end9-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB10_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB10_2:
retq
.Lfunc_end10:
.size __hip_module_dtor, .Lfunc_end10-__hip_module_dtor
.cfi_endproc
# -- End function
.type gridDimension,@object # @gridDimension
.data
.globl gridDimension
.p2align 3, 0x0
gridDimension:
.long 1 # 0x1
.long 1 # 0x1
.long 1 # 0x1
.size gridDimension, 12
.type blockDimension,@object # @blockDimension
.globl blockDimension
.p2align 3, 0x0
blockDimension:
.long 1 # 0x1
.long 1 # 0x1
.long 1 # 0x1
.size blockDimension, 12
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "csr2.txt"
.size .L.str, 9
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " "
.size .L.str.1, 2
.type _Z13processVertexPiS_S_S_S_S_,@object # @_Z13processVertexPiS_S_S_S_S_
.section .rodata,"a",@progbits
.globl _Z13processVertexPiS_S_S_S_S_
.p2align 3, 0x0
_Z13processVertexPiS_S_S_S_S_:
.quad _Z28__device_stub__processVertexPiS_S_S_S_S_
.size _Z13processVertexPiS_S_S_S_S_, 8
.type _Z15processVerticesiiiPiS_S_S_S_PbS_,@object # @_Z15processVerticesiiiPiS_S_S_S_PbS_
.globl _Z15processVerticesiiiPiS_S_S_S_PbS_
.p2align 3, 0x0
_Z15processVerticesiiiPiS_S_S_S_PbS_:
.quad _Z30__device_stub__processVerticesiiiPiS_S_S_S_PbS_
.size _Z15processVerticesiiiPiS_S_S_S_PbS_, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "Time is "
.size .L.str.2, 9
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13processVertexPiS_S_S_S_S_"
.size .L__unnamed_1, 30
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z15processVerticesiiiPiS_S_S_S_PbS_"
.size .L__unnamed_2, 37
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __gxx_personality_v0
.addrsig_sym _Z28__device_stub__processVertexPiS_S_S_S_S_
.addrsig_sym _Z30__device_stub__processVerticesiiiPiS_S_S_S_PbS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z13processVertexPiS_S_S_S_S_
.addrsig_sym _Z15processVerticesiiiPiS_S_S_S_PbS_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z15processVerticesiiiPiS_S_S_S_PbS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, 0x400, R3 ; /* 0x0000040004047824 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x164], PT ; /* 0x0000590004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R9, c[0x0][0x170] ; /* 0x00005c0004027625 */
/* 0x000fca00078e0209 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea8000c1e1900 */
/*00a0*/ LDG.E R5, [R2.64+0x4] ; /* 0x0000040402057981 */
/* 0x000ea4000c1e1900 */
/*00b0*/ ISETP.GT.AND P0, PT, R5, R0, PT ; /* 0x000000000500720c */
/* 0x004fda0003f04270 */
/*00c0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00d0*/ LOP3.LUT R6, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff067212 */
/* 0x000fe200078e33ff */
/*00e0*/ IMAD.IADD R7, R5.reuse, 0x1, -R0 ; /* 0x0000000105077824 */
/* 0x040fe200078e0a00 */
/*00f0*/ BSSY B0, 0x8b0 ; /* 0x000007b000007945 */
/* 0x000fe60003800000 */
/*0100*/ IMAD.IADD R6, R5, 0x1, R6 ; /* 0x0000000105067824 */
/* 0x000fe200078e0206 */
/*0110*/ LOP3.LUT R0, R7, 0x3, RZ, 0xc0, !PT ; /* 0x0000000307007812 */
/* 0x000fe200078ec0ff */
/*0120*/ IMAD.WIDE R4, R4, R9, c[0x0][0x190] ; /* 0x0000640004047625 */
/* 0x000fc600078e0209 */
/*0130*/ ISETP.GE.U32.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fe20003f06070 */
/*0140*/ IMAD.MOV.U32 R6, RZ, RZ, 0x1 ; /* 0x00000001ff067424 */
/* 0x000fd800078e00ff */
/*0150*/ @!P0 BRA 0x8a0 ; /* 0x0000074000008947 */
/* 0x000fea0003800000 */
/*0160*/ IMAD.IADD R7, R7, 0x1, -R0 ; /* 0x0000000107077824 */
/* 0x000fe400078e0a00 */
/*0170*/ IMAD.MOV.U32 R6, RZ, RZ, 0x1 ; /* 0x00000001ff067424 */
/* 0x000fe400078e00ff */
/*0180*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */
/* 0x001ea2000c1e1900 */
/*0190*/ IMAD.MOV.U32 R8, RZ, RZ, 0x4 ; /* 0x00000004ff087424 */
/* 0x000fc600078e00ff */
/*01a0*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ee2000c1e1900 */
/*01b0*/ IADD3 R15, R6, -0x1, R11 ; /* 0xffffffff060f7810 */
/* 0x004fca0007ffe00b */
/*01c0*/ IMAD.WIDE R12, R15, R8, c[0x0][0x180] ; /* 0x000060000f0c7625 */
/* 0x000fca00078e0208 */
/*01d0*/ LDG.E R14, [R12.64] ; /* 0x000000040c0e7981 */
/* 0x000ee2000c1e1900 */
/*01e0*/ BSSY B1, 0x360 ; /* 0x0000017000017945 */
/* 0x000fe20003800000 */
/*01f0*/ ISETP.GT.AND P0, PT, R9, R14, PT ; /* 0x0000000e0900720c */
/* 0x008fda0003f04270 */
/*0200*/ @P0 BRA 0x350 ; /* 0x0000014000000947 */
/* 0x000fea0003800000 */
/*0210*/ IMAD.SHL.U32 R18, R15, 0x4, RZ ; /* 0x000000040f127824 */
/* 0x000fe200078e00ff */
/*0220*/ SHF.R.S32.HI R10, RZ, 0x1f, R15 ; /* 0x0000001fff0a7819 */
/* 0x000fc8000001140f */
/*0230*/ SHF.L.U64.HI R10, R15, 0x2, R10 ; /* 0x000000020f0a7819 */
/* 0x000fe4000001020a */
/*0240*/ IADD3 R16, P0, R18, c[0x0][0x178], RZ ; /* 0x00005e0012107a10 */
/* 0x000fc80007f1e0ff */
/*0250*/ IADD3.X R17, R10, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f000a117a10 */
/* 0x000fca00007fe4ff */
/*0260*/ LDG.E R13, [R16.64] ; /* 0x00000004100d7981 */
/* 0x000ea2000c1e1900 */
/*0270*/ IADD3 R18, P0, R18, c[0x0][0x188], RZ ; /* 0x0000620012127a10 */
/* 0x000fc80007f1e0ff */
/*0280*/ IADD3.X R19, R10, c[0x0][0x18c], RZ, P0, !PT ; /* 0x000063000a137a10 */
/* 0x000fca00007fe4ff */
/*0290*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ee2000c1e1900 */
/*02a0*/ IMAD.WIDE R12, R13, R8, c[0x0][0x190] ; /* 0x000064000d0c7625 */
/* 0x004fca00078e0208 */
/*02b0*/ LDG.E R10, [R12.64] ; /* 0x000000040c0a7981 */
/* 0x000ea2000c1e1900 */
/*02c0*/ IMAD.IADD R21, R14, 0x1, R18 ; /* 0x000000010e157824 */
/* 0x008fca00078e0212 */
/*02d0*/ ISETP.GE.AND P0, PT, R21, R10, PT ; /* 0x0000000a1500720c */
/* 0x004fe20003f06270 */
/*02e0*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fd800078e00ff */
/*02f0*/ @!P0 IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff0e8624 */
/* 0x000fe400078e00ff */
/*0300*/ @!P0 IMAD.MOV.U32 R15, RZ, RZ, c[0x0][0x19c] ; /* 0x00006700ff0f8624 */
/* 0x000fe200078e00ff */
/*0310*/ @!P0 STG.E [R12.64], R21 ; /* 0x000000150c008986 */
/* 0x0001e8000c101904 */
/*0320*/ @!P0 STG.E.U8 [R14.64], R10 ; /* 0x0000000a0e008986 */
/* 0x0001e8000c101104 */
/*0330*/ @!P0 LDG.E R11, [R2.64] ; /* 0x00000004020b8981 */
/* 0x000168000c1e1900 */
/*0340*/ @!P0 LDG.E R9, [R4.64] ; /* 0x0000000404098981 */
/* 0x000164000c1e1900 */
/*0350*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0360*/ IMAD.IADD R15, R11, 0x1, R6 ; /* 0x000000010b0f7824 */
/* 0x021fc800078e0206 */
/*0370*/ IMAD.WIDE R12, R15, R8, c[0x0][0x180] ; /* 0x000060000f0c7625 */
/* 0x000fca00078e0208 */
/*0380*/ LDG.E R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000ea2000c1e1900 */
/*0390*/ BSSY B1, 0x510 ; /* 0x0000017000017945 */
/* 0x000fe20003800000 */
/*03a0*/ ISETP.GT.AND P0, PT, R9, R18, PT ; /* 0x000000120900720c */
/* 0x004fda0003f04270 */
/*03b0*/ @P0 BRA 0x500 ; /* 0x0000014000000947 */
/* 0x000fea0003800000 */
/*03c0*/ IMAD.SHL.U32 R16, R15, 0x4, RZ ; /* 0x000000040f107824 */
/* 0x000fe200078e00ff */
/*03d0*/ SHF.R.S32.HI R10, RZ, 0x1f, R15 ; /* 0x0000001fff0a7819 */
/* 0x000fc8000001140f */
/*03e0*/ SHF.L.U64.HI R10, R15, 0x2, R10 ; /* 0x000000020f0a7819 */
/* 0x000fe4000001020a */
/*03f0*/ IADD3 R14, P0, R16, c[0x0][0x178], RZ ; /* 0x00005e00100e7a10 */
/* 0x000fc80007f1e0ff */
/*0400*/ IADD3.X R15, R10, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f000a0f7a10 */
/* 0x000fca00007fe4ff */
/*0410*/ LDG.E R13, [R14.64] ; /* 0x000000040e0d7981 */
/* 0x000ea2000c1e1900 */
/*0420*/ IADD3 R16, P0, R16, c[0x0][0x188], RZ ; /* 0x0000620010107a10 */
/* 0x000fc80007f1e0ff */
/*0430*/ IADD3.X R17, R10, c[0x0][0x18c], RZ, P0, !PT ; /* 0x000063000a117a10 */
/* 0x000fca00007fe4ff */
/*0440*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ee2000c1e1900 */
/*0450*/ IMAD.WIDE R12, R13, R8, c[0x0][0x190] ; /* 0x000064000d0c7625 */
/* 0x004fca00078e0208 */
/*0460*/ LDG.E R10, [R12.64] ; /* 0x000000040c0a7981 */
/* 0x000ea2000c1e1900 */
/*0470*/ IMAD.IADD R21, R18, 0x1, R16 ; /* 0x0000000112157824 */
/* 0x008fca00078e0210 */
/*0480*/ ISETP.GE.AND P0, PT, R21, R10, PT ; /* 0x0000000a1500720c */
/* 0x004fe20003f06270 */
/*0490*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fd800078e00ff */
/*04a0*/ @!P0 IMAD.MOV.U32 R18, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff128624 */
/* 0x000fe400078e00ff */
/*04b0*/ @!P0 IMAD.MOV.U32 R19, RZ, RZ, c[0x0][0x19c] ; /* 0x00006700ff138624 */
/* 0x000fe200078e00ff */
/*04c0*/ @!P0 STG.E [R12.64], R21 ; /* 0x000000150c008986 */
/* 0x0001e8000c101904 */
/*04d0*/ @!P0 STG.E.U8 [R18.64], R10 ; /* 0x0000000a12008986 */
/* 0x0001e8000c101104 */
/*04e0*/ @!P0 LDG.E R11, [R2.64] ; /* 0x00000004020b8981 */
/* 0x000168000c1e1900 */
/*04f0*/ @!P0 LDG.E R9, [R4.64] ; /* 0x0000000404098981 */
/* 0x000164000c1e1900 */
/*0500*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0510*/ IADD3 R15, R6, 0x1, R11 ; /* 0x00000001060f7810 */
/* 0x020fca0007ffe00b */
/*0520*/ IMAD.WIDE R12, R15, R8, c[0x0][0x180] ; /* 0x000060000f0c7625 */
/* 0x001fca00078e0208 */
/*0530*/ LDG.E R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000ea2000c1e1900 */
/*0540*/ BSSY B1, 0x6c0 ; /* 0x0000017000017945 */
/* 0x000fe20003800000 */
/*0550*/ ISETP.GT.AND P0, PT, R9, R18, PT ; /* 0x000000120900720c */
/* 0x004fda0003f04270 */
/*0560*/ @P0 BRA 0x6b0 ; /* 0x0000014000000947 */
/* 0x000fea0003800000 */
/*0570*/ IMAD.SHL.U32 R16, R15, 0x4, RZ ; /* 0x000000040f107824 */
/* 0x000fe200078e00ff */
/*0580*/ SHF.R.S32.HI R10, RZ, 0x1f, R15 ; /* 0x0000001fff0a7819 */
/* 0x000fc8000001140f */
/*0590*/ SHF.L.U64.HI R10, R15, 0x2, R10 ; /* 0x000000020f0a7819 */
/* 0x000fe4000001020a */
/*05a0*/ IADD3 R14, P0, R16, c[0x0][0x178], RZ ; /* 0x00005e00100e7a10 */
/* 0x000fc80007f1e0ff */
/*05b0*/ IADD3.X R15, R10, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f000a0f7a10 */
/* 0x000fca00007fe4ff */
/*05c0*/ LDG.E R13, [R14.64] ; /* 0x000000040e0d7981 */
/* 0x000ea2000c1e1900 */
/*05d0*/ IADD3 R16, P0, R16, c[0x0][0x188], RZ ; /* 0x0000620010107a10 */
/* 0x000fc80007f1e0ff */
/*05e0*/ IADD3.X R17, R10, c[0x0][0x18c], RZ, P0, !PT ; /* 0x000063000a117a10 */
/* 0x000fca00007fe4ff */
/*05f0*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ee2000c1e1900 */
/*0600*/ IMAD.WIDE R12, R13, R8, c[0x0][0x190] ; /* 0x000064000d0c7625 */
/* 0x004fca00078e0208 */
/*0610*/ LDG.E R10, [R12.64] ; /* 0x000000040c0a7981 */
/* 0x000ea2000c1e1900 */
/*0620*/ IMAD.IADD R21, R18, 0x1, R16 ; /* 0x0000000112157824 */
/* 0x008fca00078e0210 */
/*0630*/ ISETP.GE.AND P0, PT, R21, R10, PT ; /* 0x0000000a1500720c */
/* 0x004fe20003f06270 */
/*0640*/ IMAD.MOV.U32 R10, RZ, RZ, 0x1 ; /* 0x00000001ff0a7424 */
/* 0x000fd800078e00ff */
/*0650*/ @!P0 IMAD.MOV.U32 R18, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff128624 */
/* 0x000fe400078e00ff */
/*0660*/ @!P0 IMAD.MOV.U32 R19, RZ, RZ, c[0x0][0x19c] ; /* 0x00006700ff138624 */
/* 0x000fe200078e00ff */
/*0670*/ @!P0 STG.E [R12.64], R21 ; /* 0x000000150c008986 */
/* 0x0001e8000c101904 */
/*0680*/ @!P0 STG.E.U8 [R18.64], R10 ; /* 0x0000000a12008986 */
/* 0x0001e8000c101104 */
/*0690*/ @!P0 LDG.E R11, [R2.64] ; /* 0x00000004020b8981 */
/* 0x000168000c1e1900 */
/*06a0*/ @!P0 LDG.E R9, [R4.64] ; /* 0x0000000404098981 */
/* 0x000164000c1e1900 */
/*06b0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*06c0*/ IADD3 R13, R6, 0x2, R11 ; /* 0x00000002060d7810 */
/* 0x021fca0007ffe00b */
/*06d0*/ IMAD.WIDE R10, R13, R8, c[0x0][0x180] ; /* 0x000060000d0a7625 */
/* 0x000fca00078e0208 */
/*06e0*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */
/* 0x000ea2000c1e1900 */
/*06f0*/ IADD3 R7, R7, -0x4, RZ ; /* 0xfffffffc07077810 */
/* 0x000fe20007ffe0ff */
/*0700*/ BSSY B1, 0x880 ; /* 0x0000017000017945 */
/* 0x000fe60003800000 */
/*0710*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe40003f05270 */
/*0720*/ ISETP.GT.AND P1, PT, R9, R15, PT ; /* 0x0000000f0900720c */
/* 0x004fda0003f24270 */
/*0730*/ @P1 BRA 0x870 ; /* 0x0000013000001947 */
/* 0x000fea0003800000 */
/*0740*/ SHF.R.S32.HI R10, RZ, 0x1f, R13 ; /* 0x0000001fff0a7819 */
/* 0x000fe2000001140d */
/*0750*/ IMAD.SHL.U32 R12, R13, 0x4, RZ ; /* 0x000000040d0c7824 */
/* 0x000fc600078e00ff */
/*0760*/ SHF.L.U64.HI R13, R13, 0x2, R10 ; /* 0x000000020d0d7819 */
/* 0x000fe4000001020a */
/*0770*/ IADD3 R10, P1, R12, c[0x0][0x178], RZ ; /* 0x00005e000c0a7a10 */
/* 0x000fc80007f3e0ff */
/*0780*/ IADD3.X R11, R13, c[0x0][0x17c], RZ, P1, !PT ; /* 0x00005f000d0b7a10 */
/* 0x000fca0000ffe4ff */
/*0790*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */
/* 0x000ea2000c1e1900 */
/*07a0*/ IADD3 R12, P1, R12, c[0x0][0x188], RZ ; /* 0x000062000c0c7a10 */
/* 0x000fc80007f3e0ff */
/*07b0*/ IADD3.X R13, R13, c[0x0][0x18c], RZ, P1, !PT ; /* 0x000063000d0d7a10 */
/* 0x000fca0000ffe4ff */
/*07c0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000ee2000c1e1900 */
/*07d0*/ IMAD.WIDE R8, R9, R8, c[0x0][0x190] ; /* 0x0000640009087625 */
/* 0x004fca00078e0208 */
/*07e0*/ LDG.E R14, [R8.64] ; /* 0x00000004080e7981 */
/* 0x000ea2000c1e1900 */
/*07f0*/ IMAD.IADD R17, R15, 0x1, R12 ; /* 0x000000010f117824 */
/* 0x008fca00078e020c */
/*0800*/ ISETP.GE.AND P1, PT, R17, R14, PT ; /* 0x0000000e1100720c */
/* 0x004fe20003f26270 */
/*0810*/ IMAD.MOV.U32 R14, RZ, RZ, 0x1 ; /* 0x00000001ff0e7424 */
/* 0x000fd800078e00ff */
/*0820*/ @!P1 PRMT R11, R14, 0x7610, R11 ; /* 0x000076100e0b9816 */
/* 0x000fe2000000000b */
/*0830*/ @!P1 IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff0e9624 */
/* 0x000fe400078e00ff */
/*0840*/ @!P1 IMAD.MOV.U32 R15, RZ, RZ, c[0x0][0x19c] ; /* 0x00006700ff0f9624 */
/* 0x000fe200078e00ff */
/*0850*/ @!P1 STG.E [R8.64], R17 ; /* 0x0000001108009986 */
/* 0x0001e8000c101904 */
/*0860*/ @!P1 STG.E.U8 [R14.64], R11 ; /* 0x0000000b0e009986 */
/* 0x0001e4000c101104 */
/*0870*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0880*/ IADD3 R6, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x000fe20007ffe0ff */
/*0890*/ @P0 BRA 0x180 ; /* 0xfffff8e000000947 */
/* 0x000fea000383ffff */
/*08a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*08b0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*08c0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*08d0*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */
/* 0x000fe40007ffe0ff */
/*08e0*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x001ea2000c1e1900 */
/*08f0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x4 ; /* 0x00000004ff0c7424 */
/* 0x000fe400078e00ff */
/*0900*/ IMAD.IADD R11, R7, 0x1, R6 ; /* 0x00000001070b7824 */
/* 0x005fe400078e0206 */
/*0910*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*0920*/ IMAD.WIDE R8, R11, R12, c[0x0][0x180] ; /* 0x000060000b087625 */
/* 0x000fca00078e020c */
/*0930*/ LDG.E R14, [R8.64] ; /* 0x00000004080e7981 */
/* 0x000ea2000c1e1900 */
/*0940*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */
/* 0x000fe20007ffe0ff */
/*0950*/ BSSY B0, 0xac0 ; /* 0x0000016000007945 */
/* 0x000fe60003800000 */
/*0960*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f05270 */
/*0970*/ ISETP.GT.AND P1, PT, R7, R14, PT ; /* 0x0000000e0700720c */
/* 0x004fda0003f24270 */
/*0980*/ @P1 BRA 0xab0 ; /* 0x0000012000001947 */
/* 0x000fea0003800000 */
/*0990*/ SHF.R.S32.HI R8, RZ, 0x1f, R11 ; /* 0x0000001fff087819 */
/* 0x000fe2000001140b */
/*09a0*/ IMAD.SHL.U32 R10, R11, 0x4, RZ ; /* 0x000000040b0a7824 */
/* 0x000fc600078e00ff */
/*09b0*/ SHF.L.U64.HI R7, R11, 0x2, R8 ; /* 0x000000020b077819 */
/* 0x000fe40000010208 */
/*09c0*/ IADD3 R8, P1, R10, c[0x0][0x178], RZ ; /* 0x00005e000a087a10 */
/* 0x000fc80007f3e0ff */
/*09d0*/ IADD3.X R9, R7, c[0x0][0x17c], RZ, P1, !PT ; /* 0x00005f0007097a10 */
/* 0x000fca0000ffe4ff */
/*09e0*/ LDG.E R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000ea2000c1e1900 */
/*09f0*/ IADD3 R10, P1, R10, c[0x0][0x188], RZ ; /* 0x000062000a0a7a10 */
/* 0x000fc80007f3e0ff */
/*0a00*/ IADD3.X R11, R7, c[0x0][0x18c], RZ, P1, !PT ; /* 0x00006300070b7a10 */
/* 0x000fca0000ffe4ff */
/*0a10*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000ee2000c1e1900 */
/*0a20*/ IMAD.WIDE R12, R8, R12, c[0x0][0x190] ; /* 0x00006400080c7625 */
/* 0x004fca00078e020c */
/*0a30*/ LDG.E R7, [R12.64] ; /* 0x000000040c077981 */
/* 0x000ea2000c1e1900 */
/*0a40*/ IMAD.IADD R16, R14, 0x1, R10 ; /* 0x000000010e107824 */
/* 0x008fca00078e020a */
/*0a50*/ ISETP.GE.AND P1, PT, R16, R7, PT ; /* 0x000000071000720c */
/* 0x004fe20003f26270 */
/*0a60*/ IMAD.MOV.U32 R7, RZ, RZ, 0x1 ; /* 0x00000001ff077424 */
/* 0x000fd800078e00ff */
/*0a70*/ @!P1 IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x198] ; /* 0x00006600ff0e9624 */
/* 0x000fe400078e00ff */
/*0a80*/ @!P1 IMAD.MOV.U32 R15, RZ, RZ, c[0x0][0x19c] ; /* 0x00006700ff0f9624 */
/* 0x000fe200078e00ff */
/*0a90*/ @!P1 STG.E [R12.64], R16 ; /* 0x000000100c009986 */
/* 0x0001e8000c101904 */
/*0aa0*/ @!P1 STG.E.U8 [R14.64], R7 ; /* 0x000000070e009986 */
/* 0x0001e4000c101104 */
/*0ab0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0ac0*/ IADD3 R6, R6, 0x1, RZ ; /* 0x0000000106067810 */
/* 0x000fe20007ffe0ff */
/*0ad0*/ @P0 BRA 0x8e0 ; /* 0xfffffe0000000947 */
/* 0x000fea000383ffff */
/*0ae0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0af0*/ BRA 0xaf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0b00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13processVertexPiS_S_S_S_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, 0x400, R3 ; /* 0x0000040000007824 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R0, 0x18fff, PT ; /* 0x00018fff0000780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R8, RZ, RZ, 0x4 ; /* 0x00000004ff087424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff047624 */
/* 0x000fe400078e00ff */
/*0090*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x184] ; /* 0x00006100ff057624 */
/* 0x000fe400078e00ff */
/*00a0*/ IMAD.WIDE R2, R0, R8, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fc600078e0208 */
/*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ea4000c1e1900 */
/*00d0*/ ISETP.GT.AND P0, PT, R4, R9, PT ; /* 0x000000090400720c */
/* 0x004fda0003f04270 */
/*00e0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00f0*/ IMAD.SHL.U32 R6, R0, 0x4, RZ ; /* 0x0000000400067824 */
/* 0x000fe200078e00ff */
/*0100*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x000fc80000011400 */
/*0110*/ SHF.L.U64.HI R3, R0, 0x2, R3 ; /* 0x0000000200037819 */
/* 0x000fe40000010203 */
/*0120*/ IADD3 R4, P0, R6, c[0x0][0x168], RZ ; /* 0x00005a0006047a10 */
/* 0x000fc80007f1e0ff */
/*0130*/ IADD3.X R5, R3, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b0003057a10 */
/* 0x000fcc00007fe4ff */
/*0140*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0150*/ IADD3 R6, P0, R6, c[0x0][0x178], RZ ; /* 0x00005e0006067a10 */
/* 0x000fc80007f1e0ff */
/*0160*/ IADD3.X R7, R3, c[0x0][0x17c], RZ, P0, !PT ; /* 0x00005f0003077a10 */
/* 0x000fca00007fe4ff */
/*0170*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee2000c1e1900 */
/*0180*/ IMAD.WIDE R2, R5, R8, c[0x0][0x180] ; /* 0x0000600005027625 */
/* 0x004fca00078e0208 */
/*0190*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea2000c1e1900 */
/*01a0*/ IMAD.IADD R9, R9, 0x1, R6 ; /* 0x0000000109097824 */
/* 0x008fca00078e0206 */
/*01b0*/ ISETP.GE.AND P0, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x004fda0003f06270 */
/*01c0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*01d0*/ SHF.R.S32.HI R0, RZ, 0x1f, R5 ; /* 0x0000001fff007819 */
/* 0x000fe20000011405 */
/*01e0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x1 ; /* 0x00000001ff077424 */
/* 0x000fe200078e00ff */
/*01f0*/ LEA R4, P0, R5.reuse, c[0x0][0x188], 0x2 ; /* 0x0000620005047a11 */
/* 0x040fe200078010ff */
/*0200*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe6000c101904 */
/*0210*/ LEA.HI.X R5, R5, c[0x0][0x18c], R0, 0x2, P0 ; /* 0x0000630005057a11 */
/* 0x000fca00000f1400 */
/*0220*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0230*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0240*/ BRA 0x240; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13processVertexPiS_S_S_S_S_
.globl _Z13processVertexPiS_S_S_S_S_
.p2align 8
.type _Z13processVertexPiS_S_S_S_S_,@function
_Z13processVertexPiS_S_S_S_S_:
v_lshl_or_b32 v0, s15, 10, v0
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x19000, v0
s_cbranch_execz .LBB0_4
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x10
s_load_b64 s[2:3], s[0:1], 0x20
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
s_load_b32 s4, s[2:3], 0x0
global_load_b32 v4, v[2:3], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_le_i32_e32 vcc_lo, s4, v4
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_4
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x8
s_load_b64 s[6:7], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v7, v[0:1], off
s_waitcnt vmcnt(1)
v_ashrrev_i32_e32 v3, 31, v2
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, v7, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[2:3]
v_add_co_u32 v0, vcc_lo, s2, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v6, vcc_lo
global_load_b32 v5, v[0:1], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v4, v5
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_4
s_load_b64 s[0:1], s[0:1], 0x28
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_mov_b32_e32 v5, 1
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_store_b32 v[0:1], v4, off
global_store_b32 v[2:3], v5, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13processVertexPiS_S_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 48
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13processVertexPiS_S_S_S_S_, .Lfunc_end0-_Z13processVertexPiS_S_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z15processVerticesiiiPiS_S_S_S_PbS_
.globl _Z15processVerticesiiiPiS_S_S_S_PbS_
.p2align 8
.type _Z15processVerticesiiiPiS_S_S_S_PbS_,@function
_Z15processVerticesiiiPiS_S_S_S_PbS_:
s_load_b32 s2, s[0:1], 0x4
v_lshl_or_b32 v0, s15, 10, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB1_7
s_load_b64 s[2:3], s[0:1], 0x10
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v3, vcc_lo
s_mov_b32 s2, 0
global_load_b64 v[4:5], v[0:1], off
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v6, v5, v4
v_cmp_lt_i32_e32 vcc_lo, 0, v6
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB1_7
s_clause 0x1
s_load_b256 s[4:11], s[0:1], 0x18
s_load_b64 s[0:1], s[0:1], 0x38
v_mov_b32_e32 v7, 0
s_mov_b32 s3, 0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s10, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v3, vcc_lo
s_branch .LBB1_4
.LBB1_3:
s_or_b32 exec_lo, exec_lo, s12
s_add_i32 s3, s3, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_ge_i32_e32 vcc_lo, s3, v6
s_or_b32 s2, vcc_lo, s2
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB1_7
.LBB1_4:
global_load_b32 v4, v[0:1], off
s_mov_b32 s12, exec_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, s3, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v5, vcc_lo
global_load_b32 v8, v[8:9], off
global_load_b32 v9, v[2:3], off
s_waitcnt vmcnt(0)
v_cmpx_le_i32_e64 v9, v8
s_cbranch_execz .LBB1_3
v_add_co_u32 v9, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v10, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v5, vcc_lo
global_load_b32 v9, v[9:10], off
global_load_b32 v11, v[4:5], off
s_waitcnt vmcnt(1)
v_ashrrev_i32_e32 v10, 31, v9
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v8, v11, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[9:10], 2, v[9:10]
v_add_co_u32 v4, vcc_lo, s10, v9
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v10, vcc_lo
global_load_b32 v9, v[4:5], off
s_waitcnt vmcnt(0)
v_cmp_lt_i32_e32 vcc_lo, v8, v9
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB1_3
v_mov_b32_e32 v9, 1
global_store_b32 v[4:5], v8, off
global_store_b8 v7, v9, s[0:1]
s_branch .LBB1_3
.LBB1_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15processVerticesiiiPiS_S_S_S_PbS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 72
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z15processVerticesiiiPiS_S_S_S_PbS_, .Lfunc_end1-_Z15processVerticesiiiPiS_S_S_S_PbS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 48
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13processVertexPiS_S_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13processVertexPiS_S_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 72
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15processVerticesiiiPiS_S_S_S_PbS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15processVerticesiiiPiS_S_S_S_PbS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | extern "C"
__global__ void dispatchDots(
//Tree specs
// per Block In
int* dotIndexes,
int* stBl0, int* nPtBl0,
int* stBl1, int* nPtBl1,
int* blLevel,
// per GPU Block In
int* idBl,
int* offsBl,
// input values, per dot
int* rkBlPos,
int* rkBlNeg,
int* rkBlMid0,
int* rkBlMid1,
// input value, per Blocks Out
int* nPtBlPos,
int* nPtBlNeg,
int* nPtBlMid0,
int* nPtBlMid1,
int nBlocks,
int nDots,
int* whatToDo,
int* addrPt,
int* addrBloc,
int* newBlockLvl,
// bloc split
int* blKeep_dotIndexes,
int* blKeep_stBl0, int* blKeep_nPtBl0,
int* blKeep_stBl1, int* blKeep_nPtBl1,
int* blKeep_blLevel,
//bloc keep
int* blFinal_dotIndexes,
int* blFinal_stBl0, int* blFinal_nPtBl0,
int* blFinal_stBl1, int* blFinal_nPtBl1,
int* blFinal_blLevel,
// bloc discard
int* blDiscard_dotIndexes,
int* blDiscard_stBl0, int* blDiscard_nPtBl0,
int* blDiscard_stBl1, int* blDiscard_nPtBl1,
int* blDiscard_blLevel
)
{
extern __shared__ int array[];
int* whatTD = (int*)&array[7];
int* addrPtSh = (int*)&whatTD[4];
int* addrBlSh = (int*)&addrPtSh[4];
// Fetch block data
int iGPUBlock=blockIdx.x;
int iThread=threadIdx.x;
int idBloc;
if (iThread==0) {
idBloc=idBl[iGPUBlock];
array[0]=offsBl[iGPUBlock];
array[1]=stBl0[idBloc];
array[2]=nPtBl0[idBloc];
array[3]=stBl1[idBloc];
array[4]=nPtBl1[idBloc];
array[5]=blLevel[idBloc];
array[6]=idBloc;
for (int i=0;i<4;i++) {
whatTD[i]=whatToDo[4*idBloc+i];
addrPtSh[i]=addrPt[4*idBloc+i];
addrBlSh[i]=addrBloc[4*idBloc+i];
/*if (array[0]==0) {
printf("BlocIni= %i; NBloc= %i; AddrPt= %i; AddrBl= %i; WTd= %i \n",idBloc, i, addrPtSh[i], addrBlSh[i], whatTD[i] );
}*/
}
}
__syncthreads();
int offsPt = array[0];
int startIndexBl0 = array[1];
int nPtBlock0 = array[2];
int startIndexBl1 = array[3]; // useless in fact
int nPtBlock1 = array[4];
int blockLevel = array[5];
int nPts = nPtBlock0 + nPtBlock1;
int ptToBeComputed = iThread+offsPt;
idBloc = array[6];
if (ptToBeComputed<nPts) {
// Oki, copy dots at the proper location
int addr_pt = startIndexBl0+ptToBeComputed;
int id_pt=dotIndexes[addr_pt];
//if (id_pt<0) {printf("Ca joue pas \n");}
int inBloc1 = (ptToBeComputed>=nPtBlock0);
int rK, wTD;
//int* tabDest;
//int isSet=0;
//int shouldBeSomeWhere = 0;
// Let's handle bloc Neg = 0
wTD=whatTD[0];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlNeg[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
rK=rkBlNeg[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
}
wTD=whatTD[1];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlPos[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK]=id_pt;
}
rK=rkBlPos[addr_pt+nDots];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
}
}
wTD=whatTD[2];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid0[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n",(addrPtSh[2]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK]=id_pt;
}
rK=rkBlMid0[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[2]+rK+nPtBlMid0[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
}
}
wTD=whatTD[3];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid1[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK]=id_pt;
}
rK=rkBlMid1[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK+nPtBlMid1[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
}
}
//if ((isSet==0)&&(shouldBeSomeWhere==1)) {printf("De bleu! Le point %i n'a été mis nulle part! [%i, %i, %i, %i]\n", id_pt, whatTD[0], whatTD[1], whatTD[2], whatTD[3]);}
}
if ((iThread==0)&&(offsPt==0)) {
// needs to fill bloc properties
for (int i=0;i<4;i++) {
int wTD = wTD=whatTD[i];
int idNewBloc = addrBlSh[i];
int nPtInBloc0, nPtInBloc1;
if (i==0) {
nPtInBloc0 = nPtBlNeg[idBloc];
nPtInBloc1 = nPtBlNeg[idBloc+nBlocks];
}
if (i==1) {
nPtInBloc0 = nPtBlPos[idBloc];
nPtInBloc1 = nPtBlPos[idBloc+nBlocks];
}
if (i==2) {
nPtInBloc0 = nPtBlMid0[idBloc];
nPtInBloc1 = nPtBlMid0[idBloc+nBlocks];
}
if (i==3) {
nPtInBloc0 = nPtBlMid1[idBloc];
nPtInBloc1 = nPtBlMid1[idBloc+nBlocks];
}
//printf("\n idNewBloc = %i, on en fait %i \n nPtInBloc0 = %i, nPtInBloc1 = %i , addrPtSh = %i \n",idNewBloc, wTD,nPtInBloc0,nPtInBloc1, addrPtSh[i]);
if (wTD==0) {
//SPLIT
//printf("SPLIT!!\n");
blKeep_stBl0[idNewBloc]=addrPtSh[i];
blKeep_nPtBl0[idNewBloc]=nPtInBloc0;
blKeep_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blKeep_nPtBl1[idNewBloc]=nPtInBloc1;
blKeep_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==1) {
//KEEP
blFinal_stBl0[idNewBloc]=addrPtSh[i];
blFinal_nPtBl0[idNewBloc]=nPtInBloc0;
blFinal_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blFinal_nPtBl1[idNewBloc]=nPtInBloc1;
blFinal_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==2) {
//DISCARD
blDiscard_stBl0[idNewBloc]=addrPtSh[i];
blDiscard_nPtBl0[idNewBloc]=nPtInBloc0;
blDiscard_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blDiscard_nPtBl1[idNewBloc]=nPtInBloc1;
blDiscard_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
}
}
} | .file "tmpxft_00026168_00000000-6_gpuDispatchDots.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_
.type _Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_, @function
_Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $712, %rsp
.cfi_def_cfa_offset 720
movq %rdi, 296(%rsp)
movq %rsi, 288(%rsp)
movq %rdx, 280(%rsp)
movq %rcx, 272(%rsp)
movq %r8, 264(%rsp)
movq %r9, 256(%rsp)
movq 720(%rsp), %rax
movq %rax, 248(%rsp)
movq 728(%rsp), %rax
movq %rax, 240(%rsp)
movq 736(%rsp), %rax
movq %rax, 232(%rsp)
movq 744(%rsp), %rax
movq %rax, 224(%rsp)
movq 752(%rsp), %rax
movq %rax, 216(%rsp)
movq 760(%rsp), %rax
movq %rax, 208(%rsp)
movq 768(%rsp), %rax
movq %rax, 200(%rsp)
movq 776(%rsp), %rax
movq %rax, 192(%rsp)
movq 784(%rsp), %rax
movq %rax, 184(%rsp)
movq 792(%rsp), %rax
movq %rax, 176(%rsp)
movq 816(%rsp), %rax
movq %rax, 168(%rsp)
movq 824(%rsp), %rax
movq %rax, 160(%rsp)
movq 832(%rsp), %rax
movq %rax, 152(%rsp)
movq 840(%rsp), %rax
movq %rax, 144(%rsp)
movq 848(%rsp), %rax
movq %rax, 136(%rsp)
movq 856(%rsp), %rax
movq %rax, 128(%rsp)
movq 864(%rsp), %rax
movq %rax, 120(%rsp)
movq 872(%rsp), %rax
movq %rax, 112(%rsp)
movq 880(%rsp), %rax
movq %rax, 104(%rsp)
movq 888(%rsp), %rax
movq %rax, 96(%rsp)
movq 896(%rsp), %rax
movq %rax, 88(%rsp)
movq 904(%rsp), %rax
movq %rax, 80(%rsp)
movq 912(%rsp), %rax
movq %rax, 72(%rsp)
movq 920(%rsp), %rax
movq %rax, 64(%rsp)
movq 928(%rsp), %rax
movq %rax, 56(%rsp)
movq 936(%rsp), %rax
movq %rax, 48(%rsp)
movq 944(%rsp), %rax
movq %rax, 40(%rsp)
movq 952(%rsp), %rax
movq %rax, 32(%rsp)
movq 960(%rsp), %rax
movq %rax, 24(%rsp)
movq 968(%rsp), %rax
movq %rax, 16(%rsp)
movq 976(%rsp), %rax
movq %rax, 8(%rsp)
movq 984(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 696(%rsp)
xorl %eax, %eax
leaq 296(%rsp), %rax
movq %rax, 368(%rsp)
leaq 288(%rsp), %rax
movq %rax, 376(%rsp)
leaq 280(%rsp), %rax
movq %rax, 384(%rsp)
leaq 272(%rsp), %rax
movq %rax, 392(%rsp)
leaq 264(%rsp), %rax
movq %rax, 400(%rsp)
leaq 256(%rsp), %rax
movq %rax, 408(%rsp)
leaq 248(%rsp), %rax
movq %rax, 416(%rsp)
leaq 240(%rsp), %rax
movq %rax, 424(%rsp)
leaq 232(%rsp), %rax
movq %rax, 432(%rsp)
leaq 224(%rsp), %rax
movq %rax, 440(%rsp)
leaq 216(%rsp), %rax
movq %rax, 448(%rsp)
leaq 208(%rsp), %rax
movq %rax, 456(%rsp)
leaq 200(%rsp), %rax
movq %rax, 464(%rsp)
leaq 192(%rsp), %rax
movq %rax, 472(%rsp)
leaq 184(%rsp), %rax
movq %rax, 480(%rsp)
leaq 176(%rsp), %rax
movq %rax, 488(%rsp)
leaq 800(%rsp), %rax
movq %rax, 496(%rsp)
leaq 808(%rsp), %rax
movq %rax, 504(%rsp)
leaq 168(%rsp), %rax
movq %rax, 512(%rsp)
leaq 160(%rsp), %rax
movq %rax, 520(%rsp)
leaq 152(%rsp), %rax
movq %rax, 528(%rsp)
leaq 144(%rsp), %rax
movq %rax, 536(%rsp)
leaq 136(%rsp), %rax
movq %rax, 544(%rsp)
leaq 128(%rsp), %rax
movq %rax, 552(%rsp)
leaq 120(%rsp), %rax
movq %rax, 560(%rsp)
leaq 112(%rsp), %rax
movq %rax, 568(%rsp)
leaq 104(%rsp), %rax
movq %rax, 576(%rsp)
leaq 96(%rsp), %rax
movq %rax, 584(%rsp)
leaq 88(%rsp), %rax
movq %rax, 592(%rsp)
leaq 80(%rsp), %rax
movq %rax, 600(%rsp)
leaq 72(%rsp), %rax
movq %rax, 608(%rsp)
leaq 64(%rsp), %rax
movq %rax, 616(%rsp)
leaq 56(%rsp), %rax
movq %rax, 624(%rsp)
leaq 48(%rsp), %rax
movq %rax, 632(%rsp)
leaq 40(%rsp), %rax
movq %rax, 640(%rsp)
leaq 32(%rsp), %rax
movq %rax, 648(%rsp)
leaq 24(%rsp), %rax
movq %rax, 656(%rsp)
leaq 16(%rsp), %rax
movq %rax, 664(%rsp)
leaq 8(%rsp), %rax
movq %rax, 672(%rsp)
movq %rsp, %rax
movq %rax, 680(%rsp)
movl $1, 320(%rsp)
movl $1, 324(%rsp)
movl $1, 328(%rsp)
movl $1, 332(%rsp)
movl $1, 336(%rsp)
movl $1, 340(%rsp)
leaq 312(%rsp), %rcx
leaq 304(%rsp), %rdx
leaq 332(%rsp), %rsi
leaq 320(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 696(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $712, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 312(%rsp)
.cfi_def_cfa_offset 728
pushq 312(%rsp)
.cfi_def_cfa_offset 736
leaq 384(%rsp), %r9
movq 348(%rsp), %rcx
movl 356(%rsp), %r8d
movq 336(%rsp), %rsi
movl 344(%rsp), %edx
leaq dispatchDots(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 720
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_, .-_Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_
.globl dispatchDots
.type dispatchDots, @function
dispatchDots:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 280(%rsp)
.cfi_def_cfa_offset 24
pushq 280(%rsp)
.cfi_def_cfa_offset 32
pushq 280(%rsp)
.cfi_def_cfa_offset 40
pushq 280(%rsp)
.cfi_def_cfa_offset 48
pushq 280(%rsp)
.cfi_def_cfa_offset 56
pushq 280(%rsp)
.cfi_def_cfa_offset 64
pushq 280(%rsp)
.cfi_def_cfa_offset 72
pushq 280(%rsp)
.cfi_def_cfa_offset 80
pushq 280(%rsp)
.cfi_def_cfa_offset 88
pushq 280(%rsp)
.cfi_def_cfa_offset 96
pushq 280(%rsp)
.cfi_def_cfa_offset 104
pushq 280(%rsp)
.cfi_def_cfa_offset 112
pushq 280(%rsp)
.cfi_def_cfa_offset 120
pushq 280(%rsp)
.cfi_def_cfa_offset 128
pushq 280(%rsp)
.cfi_def_cfa_offset 136
pushq 280(%rsp)
.cfi_def_cfa_offset 144
pushq 280(%rsp)
.cfi_def_cfa_offset 152
pushq 280(%rsp)
.cfi_def_cfa_offset 160
pushq 280(%rsp)
.cfi_def_cfa_offset 168
pushq 280(%rsp)
.cfi_def_cfa_offset 176
pushq 280(%rsp)
.cfi_def_cfa_offset 184
pushq 280(%rsp)
.cfi_def_cfa_offset 192
movl 280(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 200
movl 280(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 208
pushq 280(%rsp)
.cfi_def_cfa_offset 216
pushq 280(%rsp)
.cfi_def_cfa_offset 224
pushq 280(%rsp)
.cfi_def_cfa_offset 232
pushq 280(%rsp)
.cfi_def_cfa_offset 240
pushq 280(%rsp)
.cfi_def_cfa_offset 248
pushq 280(%rsp)
.cfi_def_cfa_offset 256
pushq 280(%rsp)
.cfi_def_cfa_offset 264
pushq 280(%rsp)
.cfi_def_cfa_offset 272
pushq 280(%rsp)
.cfi_def_cfa_offset 280
pushq 280(%rsp)
.cfi_def_cfa_offset 288
call _Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_
addq $280, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size dispatchDots, .-dispatchDots
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "dispatchDots"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq dispatchDots(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | extern "C"
__global__ void dispatchDots(
//Tree specs
// per Block In
int* dotIndexes,
int* stBl0, int* nPtBl0,
int* stBl1, int* nPtBl1,
int* blLevel,
// per GPU Block In
int* idBl,
int* offsBl,
// input values, per dot
int* rkBlPos,
int* rkBlNeg,
int* rkBlMid0,
int* rkBlMid1,
// input value, per Blocks Out
int* nPtBlPos,
int* nPtBlNeg,
int* nPtBlMid0,
int* nPtBlMid1,
int nBlocks,
int nDots,
int* whatToDo,
int* addrPt,
int* addrBloc,
int* newBlockLvl,
// bloc split
int* blKeep_dotIndexes,
int* blKeep_stBl0, int* blKeep_nPtBl0,
int* blKeep_stBl1, int* blKeep_nPtBl1,
int* blKeep_blLevel,
//bloc keep
int* blFinal_dotIndexes,
int* blFinal_stBl0, int* blFinal_nPtBl0,
int* blFinal_stBl1, int* blFinal_nPtBl1,
int* blFinal_blLevel,
// bloc discard
int* blDiscard_dotIndexes,
int* blDiscard_stBl0, int* blDiscard_nPtBl0,
int* blDiscard_stBl1, int* blDiscard_nPtBl1,
int* blDiscard_blLevel
)
{
extern __shared__ int array[];
int* whatTD = (int*)&array[7];
int* addrPtSh = (int*)&whatTD[4];
int* addrBlSh = (int*)&addrPtSh[4];
// Fetch block data
int iGPUBlock=blockIdx.x;
int iThread=threadIdx.x;
int idBloc;
if (iThread==0) {
idBloc=idBl[iGPUBlock];
array[0]=offsBl[iGPUBlock];
array[1]=stBl0[idBloc];
array[2]=nPtBl0[idBloc];
array[3]=stBl1[idBloc];
array[4]=nPtBl1[idBloc];
array[5]=blLevel[idBloc];
array[6]=idBloc;
for (int i=0;i<4;i++) {
whatTD[i]=whatToDo[4*idBloc+i];
addrPtSh[i]=addrPt[4*idBloc+i];
addrBlSh[i]=addrBloc[4*idBloc+i];
/*if (array[0]==0) {
printf("BlocIni= %i; NBloc= %i; AddrPt= %i; AddrBl= %i; WTd= %i \n",idBloc, i, addrPtSh[i], addrBlSh[i], whatTD[i] );
}*/
}
}
__syncthreads();
int offsPt = array[0];
int startIndexBl0 = array[1];
int nPtBlock0 = array[2];
int startIndexBl1 = array[3]; // useless in fact
int nPtBlock1 = array[4];
int blockLevel = array[5];
int nPts = nPtBlock0 + nPtBlock1;
int ptToBeComputed = iThread+offsPt;
idBloc = array[6];
if (ptToBeComputed<nPts) {
// Oki, copy dots at the proper location
int addr_pt = startIndexBl0+ptToBeComputed;
int id_pt=dotIndexes[addr_pt];
//if (id_pt<0) {printf("Ca joue pas \n");}
int inBloc1 = (ptToBeComputed>=nPtBlock0);
int rK, wTD;
//int* tabDest;
//int isSet=0;
//int shouldBeSomeWhere = 0;
// Let's handle bloc Neg = 0
wTD=whatTD[0];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlNeg[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
rK=rkBlNeg[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
}
wTD=whatTD[1];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlPos[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK]=id_pt;
}
rK=rkBlPos[addr_pt+nDots];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
}
}
wTD=whatTD[2];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid0[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n",(addrPtSh[2]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK]=id_pt;
}
rK=rkBlMid0[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[2]+rK+nPtBlMid0[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
}
}
wTD=whatTD[3];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid1[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK]=id_pt;
}
rK=rkBlMid1[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK+nPtBlMid1[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
}
}
//if ((isSet==0)&&(shouldBeSomeWhere==1)) {printf("De bleu! Le point %i n'a été mis nulle part! [%i, %i, %i, %i]\n", id_pt, whatTD[0], whatTD[1], whatTD[2], whatTD[3]);}
}
if ((iThread==0)&&(offsPt==0)) {
// needs to fill bloc properties
for (int i=0;i<4;i++) {
int wTD = wTD=whatTD[i];
int idNewBloc = addrBlSh[i];
int nPtInBloc0, nPtInBloc1;
if (i==0) {
nPtInBloc0 = nPtBlNeg[idBloc];
nPtInBloc1 = nPtBlNeg[idBloc+nBlocks];
}
if (i==1) {
nPtInBloc0 = nPtBlPos[idBloc];
nPtInBloc1 = nPtBlPos[idBloc+nBlocks];
}
if (i==2) {
nPtInBloc0 = nPtBlMid0[idBloc];
nPtInBloc1 = nPtBlMid0[idBloc+nBlocks];
}
if (i==3) {
nPtInBloc0 = nPtBlMid1[idBloc];
nPtInBloc1 = nPtBlMid1[idBloc+nBlocks];
}
//printf("\n idNewBloc = %i, on en fait %i \n nPtInBloc0 = %i, nPtInBloc1 = %i , addrPtSh = %i \n",idNewBloc, wTD,nPtInBloc0,nPtInBloc1, addrPtSh[i]);
if (wTD==0) {
//SPLIT
//printf("SPLIT!!\n");
blKeep_stBl0[idNewBloc]=addrPtSh[i];
blKeep_nPtBl0[idNewBloc]=nPtInBloc0;
blKeep_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blKeep_nPtBl1[idNewBloc]=nPtInBloc1;
blKeep_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==1) {
//KEEP
blFinal_stBl0[idNewBloc]=addrPtSh[i];
blFinal_nPtBl0[idNewBloc]=nPtInBloc0;
blFinal_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blFinal_nPtBl1[idNewBloc]=nPtInBloc1;
blFinal_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==2) {
//DISCARD
blDiscard_stBl0[idNewBloc]=addrPtSh[i];
blDiscard_nPtBl0[idNewBloc]=nPtInBloc0;
blDiscard_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blDiscard_nPtBl1[idNewBloc]=nPtInBloc1;
blDiscard_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
}
}
} | #include <hip/hip_runtime.h>
extern "C"
__global__ void dispatchDots(
//Tree specs
// per Block In
int* dotIndexes,
int* stBl0, int* nPtBl0,
int* stBl1, int* nPtBl1,
int* blLevel,
// per GPU Block In
int* idBl,
int* offsBl,
// input values, per dot
int* rkBlPos,
int* rkBlNeg,
int* rkBlMid0,
int* rkBlMid1,
// input value, per Blocks Out
int* nPtBlPos,
int* nPtBlNeg,
int* nPtBlMid0,
int* nPtBlMid1,
int nBlocks,
int nDots,
int* whatToDo,
int* addrPt,
int* addrBloc,
int* newBlockLvl,
// bloc split
int* blKeep_dotIndexes,
int* blKeep_stBl0, int* blKeep_nPtBl0,
int* blKeep_stBl1, int* blKeep_nPtBl1,
int* blKeep_blLevel,
//bloc keep
int* blFinal_dotIndexes,
int* blFinal_stBl0, int* blFinal_nPtBl0,
int* blFinal_stBl1, int* blFinal_nPtBl1,
int* blFinal_blLevel,
// bloc discard
int* blDiscard_dotIndexes,
int* blDiscard_stBl0, int* blDiscard_nPtBl0,
int* blDiscard_stBl1, int* blDiscard_nPtBl1,
int* blDiscard_blLevel
)
{
extern __shared__ int array[];
int* whatTD = (int*)&array[7];
int* addrPtSh = (int*)&whatTD[4];
int* addrBlSh = (int*)&addrPtSh[4];
// Fetch block data
int iGPUBlock=blockIdx.x;
int iThread=threadIdx.x;
int idBloc;
if (iThread==0) {
idBloc=idBl[iGPUBlock];
array[0]=offsBl[iGPUBlock];
array[1]=stBl0[idBloc];
array[2]=nPtBl0[idBloc];
array[3]=stBl1[idBloc];
array[4]=nPtBl1[idBloc];
array[5]=blLevel[idBloc];
array[6]=idBloc;
for (int i=0;i<4;i++) {
whatTD[i]=whatToDo[4*idBloc+i];
addrPtSh[i]=addrPt[4*idBloc+i];
addrBlSh[i]=addrBloc[4*idBloc+i];
/*if (array[0]==0) {
printf("BlocIni= %i; NBloc= %i; AddrPt= %i; AddrBl= %i; WTd= %i \n",idBloc, i, addrPtSh[i], addrBlSh[i], whatTD[i] );
}*/
}
}
__syncthreads();
int offsPt = array[0];
int startIndexBl0 = array[1];
int nPtBlock0 = array[2];
int startIndexBl1 = array[3]; // useless in fact
int nPtBlock1 = array[4];
int blockLevel = array[5];
int nPts = nPtBlock0 + nPtBlock1;
int ptToBeComputed = iThread+offsPt;
idBloc = array[6];
if (ptToBeComputed<nPts) {
// Oki, copy dots at the proper location
int addr_pt = startIndexBl0+ptToBeComputed;
int id_pt=dotIndexes[addr_pt];
//if (id_pt<0) {printf("Ca joue pas \n");}
int inBloc1 = (ptToBeComputed>=nPtBlock0);
int rK, wTD;
//int* tabDest;
//int isSet=0;
//int shouldBeSomeWhere = 0;
// Let's handle bloc Neg = 0
wTD=whatTD[0];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlNeg[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
rK=rkBlNeg[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
}
wTD=whatTD[1];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlPos[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK]=id_pt;
}
rK=rkBlPos[addr_pt+nDots];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
}
}
wTD=whatTD[2];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid0[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n",(addrPtSh[2]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK]=id_pt;
}
rK=rkBlMid0[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[2]+rK+nPtBlMid0[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
}
}
wTD=whatTD[3];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid1[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK]=id_pt;
}
rK=rkBlMid1[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK+nPtBlMid1[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
}
}
//if ((isSet==0)&&(shouldBeSomeWhere==1)) {printf("De bleu! Le point %i n'a été mis nulle part! [%i, %i, %i, %i]\n", id_pt, whatTD[0], whatTD[1], whatTD[2], whatTD[3]);}
}
if ((iThread==0)&&(offsPt==0)) {
// needs to fill bloc properties
for (int i=0;i<4;i++) {
int wTD = wTD=whatTD[i];
int idNewBloc = addrBlSh[i];
int nPtInBloc0, nPtInBloc1;
if (i==0) {
nPtInBloc0 = nPtBlNeg[idBloc];
nPtInBloc1 = nPtBlNeg[idBloc+nBlocks];
}
if (i==1) {
nPtInBloc0 = nPtBlPos[idBloc];
nPtInBloc1 = nPtBlPos[idBloc+nBlocks];
}
if (i==2) {
nPtInBloc0 = nPtBlMid0[idBloc];
nPtInBloc1 = nPtBlMid0[idBloc+nBlocks];
}
if (i==3) {
nPtInBloc0 = nPtBlMid1[idBloc];
nPtInBloc1 = nPtBlMid1[idBloc+nBlocks];
}
//printf("\n idNewBloc = %i, on en fait %i \n nPtInBloc0 = %i, nPtInBloc1 = %i , addrPtSh = %i \n",idNewBloc, wTD,nPtInBloc0,nPtInBloc1, addrPtSh[i]);
if (wTD==0) {
//SPLIT
//printf("SPLIT!!\n");
blKeep_stBl0[idNewBloc]=addrPtSh[i];
blKeep_nPtBl0[idNewBloc]=nPtInBloc0;
blKeep_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blKeep_nPtBl1[idNewBloc]=nPtInBloc1;
blKeep_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==1) {
//KEEP
blFinal_stBl0[idNewBloc]=addrPtSh[i];
blFinal_nPtBl0[idNewBloc]=nPtInBloc0;
blFinal_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blFinal_nPtBl1[idNewBloc]=nPtInBloc1;
blFinal_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==2) {
//DISCARD
blDiscard_stBl0[idNewBloc]=addrPtSh[i];
blDiscard_nPtBl0[idNewBloc]=nPtInBloc0;
blDiscard_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blDiscard_nPtBl1[idNewBloc]=nPtInBloc1;
blDiscard_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
extern "C"
__global__ void dispatchDots(
//Tree specs
// per Block In
int* dotIndexes,
int* stBl0, int* nPtBl0,
int* stBl1, int* nPtBl1,
int* blLevel,
// per GPU Block In
int* idBl,
int* offsBl,
// input values, per dot
int* rkBlPos,
int* rkBlNeg,
int* rkBlMid0,
int* rkBlMid1,
// input value, per Blocks Out
int* nPtBlPos,
int* nPtBlNeg,
int* nPtBlMid0,
int* nPtBlMid1,
int nBlocks,
int nDots,
int* whatToDo,
int* addrPt,
int* addrBloc,
int* newBlockLvl,
// bloc split
int* blKeep_dotIndexes,
int* blKeep_stBl0, int* blKeep_nPtBl0,
int* blKeep_stBl1, int* blKeep_nPtBl1,
int* blKeep_blLevel,
//bloc keep
int* blFinal_dotIndexes,
int* blFinal_stBl0, int* blFinal_nPtBl0,
int* blFinal_stBl1, int* blFinal_nPtBl1,
int* blFinal_blLevel,
// bloc discard
int* blDiscard_dotIndexes,
int* blDiscard_stBl0, int* blDiscard_nPtBl0,
int* blDiscard_stBl1, int* blDiscard_nPtBl1,
int* blDiscard_blLevel
)
{
extern __shared__ int array[];
int* whatTD = (int*)&array[7];
int* addrPtSh = (int*)&whatTD[4];
int* addrBlSh = (int*)&addrPtSh[4];
// Fetch block data
int iGPUBlock=blockIdx.x;
int iThread=threadIdx.x;
int idBloc;
if (iThread==0) {
idBloc=idBl[iGPUBlock];
array[0]=offsBl[iGPUBlock];
array[1]=stBl0[idBloc];
array[2]=nPtBl0[idBloc];
array[3]=stBl1[idBloc];
array[4]=nPtBl1[idBloc];
array[5]=blLevel[idBloc];
array[6]=idBloc;
for (int i=0;i<4;i++) {
whatTD[i]=whatToDo[4*idBloc+i];
addrPtSh[i]=addrPt[4*idBloc+i];
addrBlSh[i]=addrBloc[4*idBloc+i];
/*if (array[0]==0) {
printf("BlocIni= %i; NBloc= %i; AddrPt= %i; AddrBl= %i; WTd= %i \n",idBloc, i, addrPtSh[i], addrBlSh[i], whatTD[i] );
}*/
}
}
__syncthreads();
int offsPt = array[0];
int startIndexBl0 = array[1];
int nPtBlock0 = array[2];
int startIndexBl1 = array[3]; // useless in fact
int nPtBlock1 = array[4];
int blockLevel = array[5];
int nPts = nPtBlock0 + nPtBlock1;
int ptToBeComputed = iThread+offsPt;
idBloc = array[6];
if (ptToBeComputed<nPts) {
// Oki, copy dots at the proper location
int addr_pt = startIndexBl0+ptToBeComputed;
int id_pt=dotIndexes[addr_pt];
//if (id_pt<0) {printf("Ca joue pas \n");}
int inBloc1 = (ptToBeComputed>=nPtBlock0);
int rK, wTD;
//int* tabDest;
//int isSet=0;
//int shouldBeSomeWhere = 0;
// Let's handle bloc Neg = 0
wTD=whatTD[0];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlNeg[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
rK=rkBlNeg[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
}
wTD=whatTD[1];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlPos[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK]=id_pt;
}
rK=rkBlPos[addr_pt+nDots];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
}
}
wTD=whatTD[2];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid0[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n",(addrPtSh[2]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK]=id_pt;
}
rK=rkBlMid0[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[2]+rK+nPtBlMid0[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
}
}
wTD=whatTD[3];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid1[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK]=id_pt;
}
rK=rkBlMid1[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK+nPtBlMid1[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
}
}
//if ((isSet==0)&&(shouldBeSomeWhere==1)) {printf("De bleu! Le point %i n'a été mis nulle part! [%i, %i, %i, %i]\n", id_pt, whatTD[0], whatTD[1], whatTD[2], whatTD[3]);}
}
if ((iThread==0)&&(offsPt==0)) {
// needs to fill bloc properties
for (int i=0;i<4;i++) {
int wTD = wTD=whatTD[i];
int idNewBloc = addrBlSh[i];
int nPtInBloc0, nPtInBloc1;
if (i==0) {
nPtInBloc0 = nPtBlNeg[idBloc];
nPtInBloc1 = nPtBlNeg[idBloc+nBlocks];
}
if (i==1) {
nPtInBloc0 = nPtBlPos[idBloc];
nPtInBloc1 = nPtBlPos[idBloc+nBlocks];
}
if (i==2) {
nPtInBloc0 = nPtBlMid0[idBloc];
nPtInBloc1 = nPtBlMid0[idBloc+nBlocks];
}
if (i==3) {
nPtInBloc0 = nPtBlMid1[idBloc];
nPtInBloc1 = nPtBlMid1[idBloc+nBlocks];
}
//printf("\n idNewBloc = %i, on en fait %i \n nPtInBloc0 = %i, nPtInBloc1 = %i , addrPtSh = %i \n",idNewBloc, wTD,nPtInBloc0,nPtInBloc1, addrPtSh[i]);
if (wTD==0) {
//SPLIT
//printf("SPLIT!!\n");
blKeep_stBl0[idNewBloc]=addrPtSh[i];
blKeep_nPtBl0[idNewBloc]=nPtInBloc0;
blKeep_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blKeep_nPtBl1[idNewBloc]=nPtInBloc1;
blKeep_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==1) {
//KEEP
blFinal_stBl0[idNewBloc]=addrPtSh[i];
blFinal_nPtBl0[idNewBloc]=nPtInBloc0;
blFinal_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blFinal_nPtBl1[idNewBloc]=nPtInBloc1;
blFinal_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==2) {
//DISCARD
blDiscard_stBl0[idNewBloc]=addrPtSh[i];
blDiscard_nPtBl0[idNewBloc]=nPtInBloc0;
blDiscard_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blDiscard_nPtBl1[idNewBloc]=nPtInBloc1;
blDiscard_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected dispatchDots
.globl dispatchDots
.p2align 8
.type dispatchDots,@function
dispatchDots:
s_mov_b32 s12, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_3
s_load_b128 s[8:11], s[0:1], 0x28
s_mov_b32 s2, s15
s_ashr_i32 s3, s15, 31
s_load_b64 s[14:15], s[0:1], 0x38
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s10, s2
s_addc_u32 s5, s11, s3
s_load_b32 s10, s[4:5], 0x0
s_clause 0x2
s_load_b256 s[16:23], s[0:1], 0x8
s_load_b128 s[4:7], s[0:1], 0x88
s_load_b64 s[24:25], s[0:1], 0x98
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s10
s_ashr_i32 s11, s10, 31
s_add_u32 s2, s14, s2
s_addc_u32 s3, s15, s3
s_lshl_b64 s[14:15], s[10:11], 2
s_load_b32 s11, s[2:3], 0x0
s_add_u32 s2, s16, s14
s_addc_u32 s3, s17, s15
s_add_u32 s16, s18, s14
s_addc_u32 s17, s19, s15
s_load_b32 s13, s[2:3], 0x0
s_add_u32 s2, s20, s14
s_addc_u32 s3, s21, s15
s_load_b32 s18, s[16:17], 0x0
s_add_u32 s16, s22, s14
s_addc_u32 s17, s23, s15
s_load_b32 s19, s[2:3], 0x0
s_add_u32 s2, s8, s14
s_addc_u32 s3, s9, s15
s_load_b32 s14, s[16:17], 0x0
s_load_b32 s15, s[2:3], 0x0
s_lshl_b32 s8, s10, 2
s_mov_b64 s[2:3], 0
s_ashr_i32 s9, s8, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[8:9], 2
s_waitcnt lgkmcnt(0)
v_mov_b32_e32 v3, s11
s_add_u32 s4, s4, s8
s_addc_u32 s5, s5, s9
s_add_u32 s6, s6, s8
s_addc_u32 s7, s7, s9
v_mov_b32_e32 v4, s13
s_add_u32 s8, s24, s8
s_addc_u32 s9, s25, s9
v_mov_b32_e32 v5, s18
s_add_i32 s10, 0, 28
v_dual_mov_b32 v6, s19 :: v_dual_mov_b32 v7, s14
v_mov_b32_e32 v8, s15
ds_store_2addr_b32 v1, v3, v4 offset1:1
ds_store_2addr_b32 v1, v5, v6 offset0:2 offset1:3
ds_store_2addr_b32 v1, v7, v8 offset0:4 offset1:5
ds_store_b32 v1, v2 offset:24
.p2align 6
.LBB0_2:
s_add_u32 s14, s4, s2
s_addc_u32 s15, s5, s3
s_add_u32 s16, s6, s2
s_addc_u32 s17, s7, s3
s_load_b32 s11, s[14:15], 0x0
s_add_u32 s14, s8, s2
s_addc_u32 s15, s9, s3
s_load_b32 s13, s[16:17], 0x0
s_load_b32 s14, s[14:15], 0x0
v_mov_b32_e32 v1, s10
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_add_i32 s10, s10, 4
s_cmp_lg_u32 s2, 16
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v3, s13
v_mov_b32_e32 v4, s14
ds_store_2addr_b32 v1, v2, v3 offset1:4
ds_store_b32 v1, v4 offset:32
s_cbranch_scc1 .LBB0_2
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s12
v_mov_b32_e32 v3, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_load_b256 s[4:11], s[0:1], 0x60
ds_load_2addr_b32 v[1:2], v3 offset1:2
ds_load_2addr_b32 v[3:4], v3 offset0:4 offset1:6
s_mov_b32 s22, exec_lo
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v3, v2
v_add_nc_u32_e32 v2, v1, v0
v_readfirstlane_b32 s2, v4
s_delay_alu instid0(VALU_DEP_2)
v_cmpx_lt_i32_e64 v2, v3
s_cbranch_execz .LBB0_122
v_mov_b32_e32 v3, 0
s_load_b64 s[12:13], s[0:1], 0x0
ds_load_2addr_b32 v[6:7], v3 offset0:1 offset1:7
s_clause 0x2
s_load_b32 s23, s[0:1], 0x84
s_load_b64 s[14:15], s[0:1], 0xa8
s_load_b64 s[16:17], s[0:1], 0xd8
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v6, v2
v_readfirstlane_b32 s3, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[4:5], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, s12, v4
v_add_co_ci_u32_e32 v9, vcc_lo, s13, v5, vcc_lo
s_load_b64 s[12:13], s[0:1], 0x108
v_cmp_eq_u32_e32 vcc_lo, 3, v7
global_load_b32 v6, v[8:9], off
s_cbranch_vccnz .LBB0_34
s_load_b64 s[18:19], s[0:1], 0x48
s_mov_b32 s24, exec_lo
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s18, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s19, v5, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e32 -1, v4
s_cbranch_execz .LBB0_19
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_11
s_cmp_gt_i32 s3, 1
s_cbranch_scc0 .LBB0_12
s_mov_b32 s26, 0
s_cmp_eq_u32 s3, 2
s_mov_b32 s25, 0
s_cbranch_scc0 .LBB0_10
s_mov_b32 s25, -1
.LBB0_10:
s_mov_b64 s[20:21], s[12:13]
s_branch .LBB0_13
.LBB0_11:
s_mov_b32 s25, 0
s_cbranch_execnz .LBB0_16
s_branch .LBB0_17
.LBB0_12:
s_mov_b32 s26, -1
s_mov_b32 s25, 0
.LBB0_13:
s_and_b32 vcc_lo, exec_lo, s26
s_cbranch_vccz .LBB0_15
s_mov_b32 s25, -1
s_mov_b64 s[20:21], s[16:17]
.LBB0_15:
s_branch .LBB0_17
.LBB0_16:
s_cmp_eq_u32 s3, 0
s_mov_b64 s[20:21], s[14:15]
s_cselect_b32 s25, -1, 0
.LBB0_17:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s25
s_cbranch_vccnz .LBB0_19
v_mov_b32_e32 v5, 0
ds_load_b32 v5, v5 offset:44
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s20, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s21, v5, vcc_lo
global_store_b32 v[4:5], v6, off
.LBB0_19:
s_or_b32 exec_lo, exec_lo, s24
v_add_nc_u32_e32 v4, s23, v2
s_mov_b32 s20, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s18, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s19, v5, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e32 -1, v4
s_cbranch_execz .LBB0_33
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_25
s_cmp_gt_i32 s3, 1
s_cbranch_scc0 .LBB0_26
s_mov_b32 s24, 0
s_cmp_eq_u32 s3, 2
s_mov_b32 s21, 0
s_cbranch_scc0 .LBB0_24
s_mov_b32 s21, -1
.LBB0_24:
s_mov_b64 s[18:19], s[12:13]
s_branch .LBB0_27
.LBB0_25:
s_mov_b32 s21, 0
s_cbranch_execnz .LBB0_30
s_branch .LBB0_31
.LBB0_26:
s_mov_b32 s24, -1
s_mov_b32 s21, 0
.LBB0_27:
s_and_b32 vcc_lo, exec_lo, s24
s_cbranch_vccz .LBB0_29
s_mov_b32 s21, -1
s_mov_b64 s[18:19], s[16:17]
.LBB0_29:
s_branch .LBB0_31
.LBB0_30:
s_cmp_eq_u32 s3, 0
s_mov_b64 s[18:19], s[14:15]
s_cselect_b32 s21, -1, 0
.LBB0_31:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s21
s_cbranch_vccnz .LBB0_33
s_ashr_i32 s3, s2, 31
v_mov_b32_e32 v5, 0
s_lshl_b64 s[24:25], s[2:3], 2
v_mov_b32_e32 v7, 0
s_add_u32 s24, s6, s24
s_addc_u32 s25, s7, s25
global_load_b32 v5, v5, s[24:25]
ds_load_b32 v7, v7 offset:44
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add3_u32 v4, v5, v7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s18, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s19, v5, vcc_lo
global_store_b32 v[4:5], v6, off
.LBB0_33:
s_or_b32 exec_lo, exec_lo, s20
.LBB0_34:
v_mov_b32_e32 v4, 0
ds_load_b32 v4, v4 offset:32
s_waitcnt lgkmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 3, v4
v_readfirstlane_b32 s3, v4
s_cbranch_vccnz .LBB0_64
s_load_b64 s[18:19], s[0:1], 0x40
v_lshlrev_b64 v[4:5], 2, v[2:3]
s_mov_b32 s24, exec_lo
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s18, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s19, v5, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e32 -1, v4
s_cbranch_execz .LBB0_49
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_41
s_cmp_gt_i32 s3, 1
s_cbranch_scc0 .LBB0_42
s_mov_b32 s26, 0
s_cmp_eq_u32 s3, 2
s_mov_b32 s25, 0
s_cbranch_scc0 .LBB0_40
s_mov_b32 s25, -1
.LBB0_40:
s_mov_b64 s[20:21], s[12:13]
s_branch .LBB0_43
.LBB0_41:
s_mov_b32 s25, 0
s_cbranch_execnz .LBB0_46
s_branch .LBB0_47
.LBB0_42:
s_mov_b32 s26, -1
s_mov_b32 s25, 0
.LBB0_43:
s_and_b32 vcc_lo, exec_lo, s26
s_cbranch_vccz .LBB0_45
s_mov_b32 s25, -1
s_mov_b64 s[20:21], s[16:17]
.LBB0_45:
s_branch .LBB0_47
.LBB0_46:
s_cmp_eq_u32 s3, 0
s_mov_b64 s[20:21], s[14:15]
s_cselect_b32 s25, -1, 0
.LBB0_47:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s25
s_cbranch_vccnz .LBB0_49
v_mov_b32_e32 v5, 0
ds_load_b32 v5, v5 offset:48
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s20, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s21, v5, vcc_lo
global_store_b32 v[4:5], v6, off
.LBB0_49:
s_or_b32 exec_lo, exec_lo, s24
v_add_nc_u32_e32 v4, s23, v2
s_mov_b32 s20, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s18, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s19, v5, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e32 -1, v4
s_cbranch_execz .LBB0_63
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_55
s_cmp_gt_i32 s3, 1
s_cbranch_scc0 .LBB0_56
s_mov_b32 s24, 0
s_cmp_eq_u32 s3, 2
s_mov_b32 s21, 0
s_cbranch_scc0 .LBB0_54
s_mov_b32 s21, -1
.LBB0_54:
s_mov_b64 s[18:19], s[12:13]
s_branch .LBB0_57
.LBB0_55:
s_mov_b32 s21, 0
s_cbranch_execnz .LBB0_60
s_branch .LBB0_61
.LBB0_56:
s_mov_b32 s24, -1
s_mov_b32 s21, 0
.LBB0_57:
s_and_b32 vcc_lo, exec_lo, s24
s_cbranch_vccz .LBB0_59
s_mov_b32 s21, -1
s_mov_b64 s[18:19], s[16:17]
.LBB0_59:
s_branch .LBB0_61
.LBB0_60:
s_cmp_eq_u32 s3, 0
s_mov_b64 s[18:19], s[14:15]
s_cselect_b32 s21, -1, 0
.LBB0_61:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s21
s_cbranch_vccnz .LBB0_63
s_ashr_i32 s3, s2, 31
v_mov_b32_e32 v5, 0
s_lshl_b64 s[24:25], s[2:3], 2
v_mov_b32_e32 v7, 0
s_add_u32 s24, s4, s24
s_addc_u32 s25, s5, s25
global_load_b32 v5, v5, s[24:25]
ds_load_b32 v7, v7 offset:48
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add3_u32 v4, v5, v7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s18, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s19, v5, vcc_lo
global_store_b32 v[4:5], v6, off
.LBB0_63:
s_or_b32 exec_lo, exec_lo, s20
.LBB0_64:
v_mov_b32_e32 v4, 0
ds_load_b32 v4, v4 offset:36
s_waitcnt lgkmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 3, v4
v_readfirstlane_b32 s3, v4
s_cbranch_vccnz .LBB0_94
s_load_b64 s[18:19], s[0:1], 0x50
v_lshlrev_b64 v[4:5], 2, v[2:3]
s_mov_b32 s24, exec_lo
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s18, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s19, v5, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e32 -1, v4
s_cbranch_execz .LBB0_79
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_71
s_cmp_gt_i32 s3, 1
s_cbranch_scc0 .LBB0_72
s_mov_b32 s26, 0
s_cmp_eq_u32 s3, 2
s_mov_b32 s25, 0
s_cbranch_scc0 .LBB0_70
s_mov_b32 s25, -1
.LBB0_70:
s_mov_b64 s[20:21], s[12:13]
s_branch .LBB0_73
.LBB0_71:
s_mov_b32 s25, 0
s_cbranch_execnz .LBB0_76
s_branch .LBB0_77
.LBB0_72:
s_mov_b32 s26, -1
s_mov_b32 s25, 0
.LBB0_73:
s_and_b32 vcc_lo, exec_lo, s26
s_cbranch_vccz .LBB0_75
s_mov_b32 s25, -1
s_mov_b64 s[20:21], s[16:17]
.LBB0_75:
s_branch .LBB0_77
.LBB0_76:
s_cmp_eq_u32 s3, 0
s_mov_b64 s[20:21], s[14:15]
s_cselect_b32 s25, -1, 0
.LBB0_77:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s25
s_cbranch_vccnz .LBB0_79
v_mov_b32_e32 v5, 0
ds_load_b32 v5, v5 offset:52
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s20, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s21, v5, vcc_lo
global_store_b32 v[4:5], v6, off
.LBB0_79:
s_or_b32 exec_lo, exec_lo, s24
v_add_nc_u32_e32 v4, s23, v2
s_mov_b32 s20, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s18, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s19, v5, vcc_lo
global_load_b32 v4, v[4:5], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e32 -1, v4
s_cbranch_execz .LBB0_93
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_85
s_cmp_gt_i32 s3, 1
s_cbranch_scc0 .LBB0_86
s_mov_b32 s24, 0
s_cmp_eq_u32 s3, 2
s_mov_b32 s21, 0
s_cbranch_scc0 .LBB0_84
s_mov_b32 s21, -1
.LBB0_84:
s_mov_b64 s[18:19], s[12:13]
s_branch .LBB0_87
.LBB0_85:
s_mov_b32 s21, 0
s_cbranch_execnz .LBB0_90
s_branch .LBB0_91
.LBB0_86:
s_mov_b32 s24, -1
s_mov_b32 s21, 0
.LBB0_87:
s_and_b32 vcc_lo, exec_lo, s24
s_cbranch_vccz .LBB0_89
s_mov_b32 s21, -1
s_mov_b64 s[18:19], s[16:17]
.LBB0_89:
s_branch .LBB0_91
.LBB0_90:
s_cmp_eq_u32 s3, 0
s_mov_b64 s[18:19], s[14:15]
s_cselect_b32 s21, -1, 0
.LBB0_91:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s21
s_cbranch_vccnz .LBB0_93
s_ashr_i32 s3, s2, 31
v_mov_b32_e32 v5, 0
s_lshl_b64 s[24:25], s[2:3], 2
v_mov_b32_e32 v7, 0
s_add_u32 s24, s8, s24
s_addc_u32 s25, s9, s25
global_load_b32 v5, v5, s[24:25]
ds_load_b32 v7, v7 offset:52
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add3_u32 v4, v5, v7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[4:5], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s18, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s19, v5, vcc_lo
global_store_b32 v[4:5], v6, off
.LBB0_93:
s_or_b32 exec_lo, exec_lo, s20
.LBB0_94:
v_mov_b32_e32 v4, 0
ds_load_b32 v4, v4 offset:40
s_waitcnt lgkmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 3, v4
v_readfirstlane_b32 s3, v4
s_cbranch_vccnz .LBB0_122
s_load_b64 s[18:19], s[0:1], 0x58
v_lshlrev_b64 v[3:4], 2, v[2:3]
s_mov_b32 s24, exec_lo
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s18, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s19, v4, vcc_lo
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(0)
v_cmpx_ne_u32_e32 -1, v3
s_cbranch_execz .LBB0_109
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_101
s_cmp_gt_i32 s3, 1
s_cbranch_scc0 .LBB0_102
s_mov_b32 s26, 0
s_cmp_eq_u32 s3, 2
s_mov_b32 s25, 0
s_cbranch_scc0 .LBB0_100
s_mov_b32 s25, -1
.LBB0_100:
s_mov_b64 s[20:21], s[12:13]
s_branch .LBB0_103
.LBB0_101:
s_mov_b32 s25, 0
s_cbranch_execnz .LBB0_106
s_branch .LBB0_107
.LBB0_102:
s_mov_b32 s26, -1
s_mov_b32 s25, 0
.LBB0_103:
s_and_b32 vcc_lo, exec_lo, s26
s_cbranch_vccz .LBB0_105
s_mov_b32 s25, -1
s_mov_b64 s[20:21], s[16:17]
.LBB0_105:
s_branch .LBB0_107
.LBB0_106:
s_cmp_eq_u32 s3, 0
s_mov_b64 s[20:21], s[14:15]
s_cselect_b32 s25, -1, 0
.LBB0_107:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s25
s_cbranch_vccnz .LBB0_109
v_mov_b32_e32 v4, 0
ds_load_b32 v4, v4 offset:56
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[3:4], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s20, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s21, v4, vcc_lo
global_store_b32 v[3:4], v6, off
.LBB0_109:
s_or_b32 exec_lo, exec_lo, s24
v_add_nc_u32_e32 v2, s23, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s18, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s19, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, -1, v2
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_122
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_114
s_cmp_gt_i32 s3, 1
s_cbranch_scc0 .LBB0_115
s_mov_b32 s19, 0
s_cmp_eq_u32 s3, 2
s_mov_b32 s18, 0
s_cbranch_scc0 .LBB0_116
s_mov_b32 s18, -1
s_branch .LBB0_116
.LBB0_114:
s_mov_b32 s18, 0
s_cbranch_execnz .LBB0_119
s_branch .LBB0_120
.LBB0_115:
s_mov_b32 s19, -1
s_mov_b32 s18, 0
.LBB0_116:
s_and_b32 vcc_lo, exec_lo, s19
s_cbranch_vccz .LBB0_118
s_mov_b32 s18, -1
s_mov_b64 s[12:13], s[16:17]
.LBB0_118:
s_branch .LBB0_120
.LBB0_119:
s_cmp_eq_u32 s3, 0
s_mov_b64 s[12:13], s[14:15]
s_cselect_b32 s18, -1, 0
.LBB0_120:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s18
s_cbranch_vccnz .LBB0_122
s_ashr_i32 s3, s2, 31
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v4, 0
s_lshl_b64 s[14:15], s[2:3], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s14, s10, s14
s_addc_u32 s15, s11, s15
ds_load_b32 v4, v4 offset:56
global_load_b32 v3, v3, s[14:15]
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add3_u32 v2, v3, v4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s12, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s13, v3, vcc_lo
global_store_b32 v[2:3], v6, off
.LBB0_122:
s_or_b32 exec_lo, exec_lo, s22
v_or_b32_e32 v0, v1, v0
s_mov_b32 s33, 0
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_144
s_clause 0x2
s_load_b32 s34, s[0:1], 0x80
s_load_b64 s[50:51], s[0:1], 0xa0
s_load_b256 s[12:19], s[0:1], 0xb0
s_ashr_i32 s3, s2, 31
s_load_b64 s[28:29], s[0:1], 0xd0
s_lshl_b64 s[48:49], s[2:3], 2
s_load_b256 s[20:27], s[0:1], 0xe0
s_add_u32 s30, s6, s48
s_addc_u32 s31, s7, s49
s_load_b256 s[36:43], s[0:1], 0x110
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_add_i32 s44, s2, s34
s_load_b64 s[34:35], s[0:1], 0x100
s_ashr_i32 s45, s44, 31
s_load_b64 s[0:1], s[0:1], 0x130
s_lshl_b64 s[52:53], s[44:45], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s6, s6, s52
s_addc_u32 s7, s7, s53
s_add_u32 s44, s4, s48
s_addc_u32 s45, s5, s49
s_add_u32 s4, s4, s52
s_addc_u32 s5, s5, s53
s_add_u32 s46, s8, s48
s_addc_u32 s47, s9, s49
s_add_u32 s8, s8, s52
s_addc_u32 s9, s9, s53
s_add_u32 s48, s10, s48
s_addc_u32 s49, s11, s49
s_add_u32 s10, s10, s52
s_addc_u32 s11, s11, s53
s_lshl_b32 s2, s2, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s3, s2, 31
s_lshl_b64 s[2:3], s[2:3], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s2, s50, s2
s_addc_u32 s3, s51, s3
s_branch .LBB0_125
.LBB0_124:
s_add_i32 s33, s33, 4
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmp_lg_u32 s33, 16
s_cbranch_scc0 .LBB0_144
.LBB0_125:
s_add_i32 s52, s33, 0
s_cmp_lg_u32 s33, 0
v_mov_b32_e32 v3, s52
ds_load_2addr_b32 v[3:4], v3 offset0:7 offset1:15
s_waitcnt lgkmcnt(0)
v_readfirstlane_b32 s53, v3
v_readfirstlane_b32 s50, v4
s_cbranch_scc0 .LBB0_141
s_cmp_lg_u32 s33, 4
s_cbranch_scc0 .LBB0_142
.LBB0_127:
s_cmp_lg_u32 s33, 8
s_cbranch_scc0 .LBB0_143
.LBB0_128:
s_cmp_lg_u32 s33, 12
s_cbranch_scc1 .LBB0_130
.LBB0_129:
s_clause 0x1
global_load_b32 v1, v0, s[48:49]
global_load_b32 v2, v0, s[10:11]
.LBB0_130:
s_cmp_lt_i32 s53, 1
s_mov_b32 s51, -1
s_cbranch_scc1 .LBB0_138
s_cmp_lt_i32 s53, 2
s_cbranch_scc1 .LBB0_135
s_cmp_eq_u32 s53, 2
s_cbranch_scc0 .LBB0_134
v_mov_b32_e32 v3, s52
s_ashr_i32 s51, s50, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[54:55], s[50:51], 2
s_add_u32 s56, s36, s54
ds_load_b32 v3, v3 offset:44
s_addc_u32 s57, s37, s55
s_add_u32 s58, s38, s54
s_addc_u32 s59, s39, s55
s_add_u32 s60, s40, s54
s_addc_u32 s61, s41, s55
s_add_u32 s62, s42, s54
s_addc_u32 s63, s43, s55
s_add_u32 s54, s0, s54
s_addc_u32 s55, s1, s55
s_waitcnt vmcnt(1) lgkmcnt(0)
v_add_nc_u32_e32 v4, v3, v1
s_clause 0x2
global_store_b32 v0, v3, s[56:57]
global_store_b32 v0, v1, s[58:59]
global_store_b32 v0, v4, s[60:61]
s_waitcnt vmcnt(0)
global_store_b32 v0, v2, s[62:63]
global_load_b32 v3, v0, s[2:3]
s_waitcnt vmcnt(0)
global_store_b32 v0, v3, s[54:55]
.LBB0_134:
s_mov_b32 s51, 0
.LBB0_135:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s51
s_cbranch_vccnz .LBB0_137
v_mov_b32_e32 v3, s52
s_ashr_i32 s51, s50, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[54:55], s[50:51], 2
s_add_u32 s56, s20, s54
ds_load_b32 v3, v3 offset:44
s_addc_u32 s57, s21, s55
s_add_u32 s58, s22, s54
s_addc_u32 s59, s23, s55
s_add_u32 s60, s24, s54
s_addc_u32 s61, s25, s55
s_add_u32 s62, s26, s54
s_addc_u32 s63, s27, s55
s_add_u32 s54, s34, s54
s_addc_u32 s55, s35, s55
s_waitcnt vmcnt(1) lgkmcnt(0)
v_add_nc_u32_e32 v4, v3, v1
s_clause 0x2
global_store_b32 v0, v3, s[56:57]
global_store_b32 v0, v1, s[58:59]
global_store_b32 v0, v4, s[60:61]
s_waitcnt vmcnt(0)
global_store_b32 v0, v2, s[62:63]
global_load_b32 v3, v0, s[2:3]
s_waitcnt vmcnt(0)
global_store_b32 v0, v3, s[54:55]
.LBB0_137:
s_mov_b32 s51, 0
.LBB0_138:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s51
s_cbranch_vccnz .LBB0_124
s_cmp_lg_u32 s53, 0
s_cbranch_scc1 .LBB0_124
v_mov_b32_e32 v3, s52
s_ashr_i32 s51, s50, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[50:51], s[50:51], 2
s_add_u32 s52, s12, s50
ds_load_b32 v3, v3 offset:44
s_addc_u32 s53, s13, s51
s_add_u32 s54, s14, s50
s_addc_u32 s55, s15, s51
s_add_u32 s56, s16, s50
s_addc_u32 s57, s17, s51
s_add_u32 s58, s18, s50
s_addc_u32 s59, s19, s51
s_add_u32 s50, s28, s50
s_addc_u32 s51, s29, s51
s_waitcnt vmcnt(1) lgkmcnt(0)
v_add_nc_u32_e32 v4, v3, v1
s_clause 0x2
global_store_b32 v0, v3, s[52:53]
global_store_b32 v0, v1, s[54:55]
global_store_b32 v0, v4, s[56:57]
s_waitcnt vmcnt(0)
global_store_b32 v0, v2, s[58:59]
global_load_b32 v3, v0, s[2:3]
s_waitcnt vmcnt(0)
global_store_b32 v0, v3, s[50:51]
s_branch .LBB0_124
.LBB0_141:
s_clause 0x1
global_load_b32 v1, v0, s[30:31]
global_load_b32 v2, v0, s[6:7]
s_cmp_lg_u32 s33, 4
s_cbranch_scc1 .LBB0_127
.LBB0_142:
s_clause 0x1
global_load_b32 v1, v0, s[44:45]
global_load_b32 v2, v0, s[4:5]
s_cmp_lg_u32 s33, 8
s_cbranch_scc1 .LBB0_128
.LBB0_143:
s_clause 0x1
global_load_b32 v1, v0, s[46:47]
global_load_b32 v2, v0, s[8:9]
s_cmp_lg_u32 s33, 12
s_cbranch_scc0 .LBB0_129
s_branch .LBB0_130
.LBB0_144:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel dispatchDots
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 64
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size dispatchDots, .Lfunc_end0-dispatchDots
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 72
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 80
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 88
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 96
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 104
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 112
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 120
.size: 8
.value_kind: global_buffer
- .offset: 128
.size: 4
.value_kind: by_value
- .offset: 132
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 136
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 144
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 152
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 160
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 168
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 176
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 184
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 192
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 200
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 208
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 216
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 224
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 232
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 240
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 248
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 256
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 264
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 272
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 280
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 288
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 296
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 304
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: dispatchDots
.private_segment_fixed_size: 0
.sgpr_count: 66
.sgpr_spill_count: 0
.symbol: dispatchDots.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
extern "C"
__global__ void dispatchDots(
//Tree specs
// per Block In
int* dotIndexes,
int* stBl0, int* nPtBl0,
int* stBl1, int* nPtBl1,
int* blLevel,
// per GPU Block In
int* idBl,
int* offsBl,
// input values, per dot
int* rkBlPos,
int* rkBlNeg,
int* rkBlMid0,
int* rkBlMid1,
// input value, per Blocks Out
int* nPtBlPos,
int* nPtBlNeg,
int* nPtBlMid0,
int* nPtBlMid1,
int nBlocks,
int nDots,
int* whatToDo,
int* addrPt,
int* addrBloc,
int* newBlockLvl,
// bloc split
int* blKeep_dotIndexes,
int* blKeep_stBl0, int* blKeep_nPtBl0,
int* blKeep_stBl1, int* blKeep_nPtBl1,
int* blKeep_blLevel,
//bloc keep
int* blFinal_dotIndexes,
int* blFinal_stBl0, int* blFinal_nPtBl0,
int* blFinal_stBl1, int* blFinal_nPtBl1,
int* blFinal_blLevel,
// bloc discard
int* blDiscard_dotIndexes,
int* blDiscard_stBl0, int* blDiscard_nPtBl0,
int* blDiscard_stBl1, int* blDiscard_nPtBl1,
int* blDiscard_blLevel
)
{
extern __shared__ int array[];
int* whatTD = (int*)&array[7];
int* addrPtSh = (int*)&whatTD[4];
int* addrBlSh = (int*)&addrPtSh[4];
// Fetch block data
int iGPUBlock=blockIdx.x;
int iThread=threadIdx.x;
int idBloc;
if (iThread==0) {
idBloc=idBl[iGPUBlock];
array[0]=offsBl[iGPUBlock];
array[1]=stBl0[idBloc];
array[2]=nPtBl0[idBloc];
array[3]=stBl1[idBloc];
array[4]=nPtBl1[idBloc];
array[5]=blLevel[idBloc];
array[6]=idBloc;
for (int i=0;i<4;i++) {
whatTD[i]=whatToDo[4*idBloc+i];
addrPtSh[i]=addrPt[4*idBloc+i];
addrBlSh[i]=addrBloc[4*idBloc+i];
/*if (array[0]==0) {
printf("BlocIni= %i; NBloc= %i; AddrPt= %i; AddrBl= %i; WTd= %i \n",idBloc, i, addrPtSh[i], addrBlSh[i], whatTD[i] );
}*/
}
}
__syncthreads();
int offsPt = array[0];
int startIndexBl0 = array[1];
int nPtBlock0 = array[2];
int startIndexBl1 = array[3]; // useless in fact
int nPtBlock1 = array[4];
int blockLevel = array[5];
int nPts = nPtBlock0 + nPtBlock1;
int ptToBeComputed = iThread+offsPt;
idBloc = array[6];
if (ptToBeComputed<nPts) {
// Oki, copy dots at the proper location
int addr_pt = startIndexBl0+ptToBeComputed;
int id_pt=dotIndexes[addr_pt];
//if (id_pt<0) {printf("Ca joue pas \n");}
int inBloc1 = (ptToBeComputed>=nPtBlock0);
int rK, wTD;
//int* tabDest;
//int isSet=0;
//int shouldBeSomeWhere = 0;
// Let's handle bloc Neg = 0
wTD=whatTD[0];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlNeg[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
rK=rkBlNeg[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
}
wTD=whatTD[1];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlPos[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK]=id_pt;
}
rK=rkBlPos[addr_pt+nDots];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
}
}
wTD=whatTD[2];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid0[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n",(addrPtSh[2]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK]=id_pt;
}
rK=rkBlMid0[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[2]+rK+nPtBlMid0[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
}
}
wTD=whatTD[3];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid1[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK]=id_pt;
}
rK=rkBlMid1[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK+nPtBlMid1[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
}
}
//if ((isSet==0)&&(shouldBeSomeWhere==1)) {printf("De bleu! Le point %i n'a été mis nulle part! [%i, %i, %i, %i]\n", id_pt, whatTD[0], whatTD[1], whatTD[2], whatTD[3]);}
}
if ((iThread==0)&&(offsPt==0)) {
// needs to fill bloc properties
for (int i=0;i<4;i++) {
int wTD = wTD=whatTD[i];
int idNewBloc = addrBlSh[i];
int nPtInBloc0, nPtInBloc1;
if (i==0) {
nPtInBloc0 = nPtBlNeg[idBloc];
nPtInBloc1 = nPtBlNeg[idBloc+nBlocks];
}
if (i==1) {
nPtInBloc0 = nPtBlPos[idBloc];
nPtInBloc1 = nPtBlPos[idBloc+nBlocks];
}
if (i==2) {
nPtInBloc0 = nPtBlMid0[idBloc];
nPtInBloc1 = nPtBlMid0[idBloc+nBlocks];
}
if (i==3) {
nPtInBloc0 = nPtBlMid1[idBloc];
nPtInBloc1 = nPtBlMid1[idBloc+nBlocks];
}
//printf("\n idNewBloc = %i, on en fait %i \n nPtInBloc0 = %i, nPtInBloc1 = %i , addrPtSh = %i \n",idNewBloc, wTD,nPtInBloc0,nPtInBloc1, addrPtSh[i]);
if (wTD==0) {
//SPLIT
//printf("SPLIT!!\n");
blKeep_stBl0[idNewBloc]=addrPtSh[i];
blKeep_nPtBl0[idNewBloc]=nPtInBloc0;
blKeep_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blKeep_nPtBl1[idNewBloc]=nPtInBloc1;
blKeep_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==1) {
//KEEP
blFinal_stBl0[idNewBloc]=addrPtSh[i];
blFinal_nPtBl0[idNewBloc]=nPtInBloc0;
blFinal_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blFinal_nPtBl1[idNewBloc]=nPtInBloc1;
blFinal_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==2) {
//DISCARD
blDiscard_stBl0[idNewBloc]=addrPtSh[i];
blDiscard_nPtBl0[idNewBloc]=nPtInBloc0;
blDiscard_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blDiscard_nPtBl1[idNewBloc]=nPtInBloc1;
blDiscard_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
}
}
} | .text
.file "gpuDispatchDots.hip"
.globl __device_stub__dispatchDots # -- Begin function __device_stub__dispatchDots
.p2align 4, 0x90
.type __device_stub__dispatchDots,@function
__device_stub__dispatchDots: # @__device_stub__dispatchDots
.cfi_startproc
# %bb.0:
subq $424, %rsp # imm = 0x1A8
.cfi_def_cfa_offset 432
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 432(%rsp), %rax
movq %rax, 144(%rsp)
leaq 440(%rsp), %rax
movq %rax, 152(%rsp)
leaq 448(%rsp), %rax
movq %rax, 160(%rsp)
leaq 456(%rsp), %rax
movq %rax, 168(%rsp)
leaq 464(%rsp), %rax
movq %rax, 176(%rsp)
leaq 472(%rsp), %rax
movq %rax, 184(%rsp)
leaq 480(%rsp), %rax
movq %rax, 192(%rsp)
leaq 488(%rsp), %rax
movq %rax, 200(%rsp)
leaq 496(%rsp), %rax
movq %rax, 208(%rsp)
leaq 504(%rsp), %rax
movq %rax, 216(%rsp)
leaq 512(%rsp), %rax
movq %rax, 224(%rsp)
leaq 520(%rsp), %rax
movq %rax, 232(%rsp)
leaq 528(%rsp), %rax
movq %rax, 240(%rsp)
leaq 536(%rsp), %rax
movq %rax, 248(%rsp)
leaq 544(%rsp), %rax
movq %rax, 256(%rsp)
leaq 552(%rsp), %rax
movq %rax, 264(%rsp)
leaq 560(%rsp), %rax
movq %rax, 272(%rsp)
leaq 568(%rsp), %rax
movq %rax, 280(%rsp)
leaq 576(%rsp), %rax
movq %rax, 288(%rsp)
leaq 584(%rsp), %rax
movq %rax, 296(%rsp)
leaq 592(%rsp), %rax
movq %rax, 304(%rsp)
leaq 600(%rsp), %rax
movq %rax, 312(%rsp)
leaq 608(%rsp), %rax
movq %rax, 320(%rsp)
leaq 616(%rsp), %rax
movq %rax, 328(%rsp)
leaq 624(%rsp), %rax
movq %rax, 336(%rsp)
leaq 632(%rsp), %rax
movq %rax, 344(%rsp)
leaq 640(%rsp), %rax
movq %rax, 352(%rsp)
leaq 648(%rsp), %rax
movq %rax, 360(%rsp)
leaq 656(%rsp), %rax
movq %rax, 368(%rsp)
leaq 664(%rsp), %rax
movq %rax, 376(%rsp)
leaq 672(%rsp), %rax
movq %rax, 384(%rsp)
leaq 680(%rsp), %rax
movq %rax, 392(%rsp)
leaq 688(%rsp), %rax
movq %rax, 400(%rsp)
leaq 696(%rsp), %rax
movq %rax, 408(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $dispatchDots, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $440, %rsp # imm = 0x1B8
.cfi_adjust_cfa_offset -440
retq
.Lfunc_end0:
.size __device_stub__dispatchDots, .Lfunc_end0-__device_stub__dispatchDots
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $dispatchDots, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type dispatchDots,@object # @dispatchDots
.section .rodata,"a",@progbits
.globl dispatchDots
.p2align 3, 0x0
dispatchDots:
.quad __device_stub__dispatchDots
.size dispatchDots, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "dispatchDots"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__dispatchDots
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym dispatchDots
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00026168_00000000-6_gpuDispatchDots.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_
.type _Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_, @function
_Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $712, %rsp
.cfi_def_cfa_offset 720
movq %rdi, 296(%rsp)
movq %rsi, 288(%rsp)
movq %rdx, 280(%rsp)
movq %rcx, 272(%rsp)
movq %r8, 264(%rsp)
movq %r9, 256(%rsp)
movq 720(%rsp), %rax
movq %rax, 248(%rsp)
movq 728(%rsp), %rax
movq %rax, 240(%rsp)
movq 736(%rsp), %rax
movq %rax, 232(%rsp)
movq 744(%rsp), %rax
movq %rax, 224(%rsp)
movq 752(%rsp), %rax
movq %rax, 216(%rsp)
movq 760(%rsp), %rax
movq %rax, 208(%rsp)
movq 768(%rsp), %rax
movq %rax, 200(%rsp)
movq 776(%rsp), %rax
movq %rax, 192(%rsp)
movq 784(%rsp), %rax
movq %rax, 184(%rsp)
movq 792(%rsp), %rax
movq %rax, 176(%rsp)
movq 816(%rsp), %rax
movq %rax, 168(%rsp)
movq 824(%rsp), %rax
movq %rax, 160(%rsp)
movq 832(%rsp), %rax
movq %rax, 152(%rsp)
movq 840(%rsp), %rax
movq %rax, 144(%rsp)
movq 848(%rsp), %rax
movq %rax, 136(%rsp)
movq 856(%rsp), %rax
movq %rax, 128(%rsp)
movq 864(%rsp), %rax
movq %rax, 120(%rsp)
movq 872(%rsp), %rax
movq %rax, 112(%rsp)
movq 880(%rsp), %rax
movq %rax, 104(%rsp)
movq 888(%rsp), %rax
movq %rax, 96(%rsp)
movq 896(%rsp), %rax
movq %rax, 88(%rsp)
movq 904(%rsp), %rax
movq %rax, 80(%rsp)
movq 912(%rsp), %rax
movq %rax, 72(%rsp)
movq 920(%rsp), %rax
movq %rax, 64(%rsp)
movq 928(%rsp), %rax
movq %rax, 56(%rsp)
movq 936(%rsp), %rax
movq %rax, 48(%rsp)
movq 944(%rsp), %rax
movq %rax, 40(%rsp)
movq 952(%rsp), %rax
movq %rax, 32(%rsp)
movq 960(%rsp), %rax
movq %rax, 24(%rsp)
movq 968(%rsp), %rax
movq %rax, 16(%rsp)
movq 976(%rsp), %rax
movq %rax, 8(%rsp)
movq 984(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 696(%rsp)
xorl %eax, %eax
leaq 296(%rsp), %rax
movq %rax, 368(%rsp)
leaq 288(%rsp), %rax
movq %rax, 376(%rsp)
leaq 280(%rsp), %rax
movq %rax, 384(%rsp)
leaq 272(%rsp), %rax
movq %rax, 392(%rsp)
leaq 264(%rsp), %rax
movq %rax, 400(%rsp)
leaq 256(%rsp), %rax
movq %rax, 408(%rsp)
leaq 248(%rsp), %rax
movq %rax, 416(%rsp)
leaq 240(%rsp), %rax
movq %rax, 424(%rsp)
leaq 232(%rsp), %rax
movq %rax, 432(%rsp)
leaq 224(%rsp), %rax
movq %rax, 440(%rsp)
leaq 216(%rsp), %rax
movq %rax, 448(%rsp)
leaq 208(%rsp), %rax
movq %rax, 456(%rsp)
leaq 200(%rsp), %rax
movq %rax, 464(%rsp)
leaq 192(%rsp), %rax
movq %rax, 472(%rsp)
leaq 184(%rsp), %rax
movq %rax, 480(%rsp)
leaq 176(%rsp), %rax
movq %rax, 488(%rsp)
leaq 800(%rsp), %rax
movq %rax, 496(%rsp)
leaq 808(%rsp), %rax
movq %rax, 504(%rsp)
leaq 168(%rsp), %rax
movq %rax, 512(%rsp)
leaq 160(%rsp), %rax
movq %rax, 520(%rsp)
leaq 152(%rsp), %rax
movq %rax, 528(%rsp)
leaq 144(%rsp), %rax
movq %rax, 536(%rsp)
leaq 136(%rsp), %rax
movq %rax, 544(%rsp)
leaq 128(%rsp), %rax
movq %rax, 552(%rsp)
leaq 120(%rsp), %rax
movq %rax, 560(%rsp)
leaq 112(%rsp), %rax
movq %rax, 568(%rsp)
leaq 104(%rsp), %rax
movq %rax, 576(%rsp)
leaq 96(%rsp), %rax
movq %rax, 584(%rsp)
leaq 88(%rsp), %rax
movq %rax, 592(%rsp)
leaq 80(%rsp), %rax
movq %rax, 600(%rsp)
leaq 72(%rsp), %rax
movq %rax, 608(%rsp)
leaq 64(%rsp), %rax
movq %rax, 616(%rsp)
leaq 56(%rsp), %rax
movq %rax, 624(%rsp)
leaq 48(%rsp), %rax
movq %rax, 632(%rsp)
leaq 40(%rsp), %rax
movq %rax, 640(%rsp)
leaq 32(%rsp), %rax
movq %rax, 648(%rsp)
leaq 24(%rsp), %rax
movq %rax, 656(%rsp)
leaq 16(%rsp), %rax
movq %rax, 664(%rsp)
leaq 8(%rsp), %rax
movq %rax, 672(%rsp)
movq %rsp, %rax
movq %rax, 680(%rsp)
movl $1, 320(%rsp)
movl $1, 324(%rsp)
movl $1, 328(%rsp)
movl $1, 332(%rsp)
movl $1, 336(%rsp)
movl $1, 340(%rsp)
leaq 312(%rsp), %rcx
leaq 304(%rsp), %rdx
leaq 332(%rsp), %rsi
leaq 320(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 696(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $712, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 312(%rsp)
.cfi_def_cfa_offset 728
pushq 312(%rsp)
.cfi_def_cfa_offset 736
leaq 384(%rsp), %r9
movq 348(%rsp), %rcx
movl 356(%rsp), %r8d
movq 336(%rsp), %rsi
movl 344(%rsp), %edx
leaq dispatchDots(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 720
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_, .-_Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_
.globl dispatchDots
.type dispatchDots, @function
dispatchDots:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 280(%rsp)
.cfi_def_cfa_offset 24
pushq 280(%rsp)
.cfi_def_cfa_offset 32
pushq 280(%rsp)
.cfi_def_cfa_offset 40
pushq 280(%rsp)
.cfi_def_cfa_offset 48
pushq 280(%rsp)
.cfi_def_cfa_offset 56
pushq 280(%rsp)
.cfi_def_cfa_offset 64
pushq 280(%rsp)
.cfi_def_cfa_offset 72
pushq 280(%rsp)
.cfi_def_cfa_offset 80
pushq 280(%rsp)
.cfi_def_cfa_offset 88
pushq 280(%rsp)
.cfi_def_cfa_offset 96
pushq 280(%rsp)
.cfi_def_cfa_offset 104
pushq 280(%rsp)
.cfi_def_cfa_offset 112
pushq 280(%rsp)
.cfi_def_cfa_offset 120
pushq 280(%rsp)
.cfi_def_cfa_offset 128
pushq 280(%rsp)
.cfi_def_cfa_offset 136
pushq 280(%rsp)
.cfi_def_cfa_offset 144
pushq 280(%rsp)
.cfi_def_cfa_offset 152
pushq 280(%rsp)
.cfi_def_cfa_offset 160
pushq 280(%rsp)
.cfi_def_cfa_offset 168
pushq 280(%rsp)
.cfi_def_cfa_offset 176
pushq 280(%rsp)
.cfi_def_cfa_offset 184
pushq 280(%rsp)
.cfi_def_cfa_offset 192
movl 280(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 200
movl 280(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 208
pushq 280(%rsp)
.cfi_def_cfa_offset 216
pushq 280(%rsp)
.cfi_def_cfa_offset 224
pushq 280(%rsp)
.cfi_def_cfa_offset 232
pushq 280(%rsp)
.cfi_def_cfa_offset 240
pushq 280(%rsp)
.cfi_def_cfa_offset 248
pushq 280(%rsp)
.cfi_def_cfa_offset 256
pushq 280(%rsp)
.cfi_def_cfa_offset 264
pushq 280(%rsp)
.cfi_def_cfa_offset 272
pushq 280(%rsp)
.cfi_def_cfa_offset 280
pushq 280(%rsp)
.cfi_def_cfa_offset 288
call _Z108__device_stub__Z12dispatchDotsPiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_iiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_
addq $280, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size dispatchDots, .-dispatchDots
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "dispatchDots"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq dispatchDots(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gpuDispatchDots.hip"
.globl __device_stub__dispatchDots # -- Begin function __device_stub__dispatchDots
.p2align 4, 0x90
.type __device_stub__dispatchDots,@function
__device_stub__dispatchDots: # @__device_stub__dispatchDots
.cfi_startproc
# %bb.0:
subq $424, %rsp # imm = 0x1A8
.cfi_def_cfa_offset 432
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 432(%rsp), %rax
movq %rax, 144(%rsp)
leaq 440(%rsp), %rax
movq %rax, 152(%rsp)
leaq 448(%rsp), %rax
movq %rax, 160(%rsp)
leaq 456(%rsp), %rax
movq %rax, 168(%rsp)
leaq 464(%rsp), %rax
movq %rax, 176(%rsp)
leaq 472(%rsp), %rax
movq %rax, 184(%rsp)
leaq 480(%rsp), %rax
movq %rax, 192(%rsp)
leaq 488(%rsp), %rax
movq %rax, 200(%rsp)
leaq 496(%rsp), %rax
movq %rax, 208(%rsp)
leaq 504(%rsp), %rax
movq %rax, 216(%rsp)
leaq 512(%rsp), %rax
movq %rax, 224(%rsp)
leaq 520(%rsp), %rax
movq %rax, 232(%rsp)
leaq 528(%rsp), %rax
movq %rax, 240(%rsp)
leaq 536(%rsp), %rax
movq %rax, 248(%rsp)
leaq 544(%rsp), %rax
movq %rax, 256(%rsp)
leaq 552(%rsp), %rax
movq %rax, 264(%rsp)
leaq 560(%rsp), %rax
movq %rax, 272(%rsp)
leaq 568(%rsp), %rax
movq %rax, 280(%rsp)
leaq 576(%rsp), %rax
movq %rax, 288(%rsp)
leaq 584(%rsp), %rax
movq %rax, 296(%rsp)
leaq 592(%rsp), %rax
movq %rax, 304(%rsp)
leaq 600(%rsp), %rax
movq %rax, 312(%rsp)
leaq 608(%rsp), %rax
movq %rax, 320(%rsp)
leaq 616(%rsp), %rax
movq %rax, 328(%rsp)
leaq 624(%rsp), %rax
movq %rax, 336(%rsp)
leaq 632(%rsp), %rax
movq %rax, 344(%rsp)
leaq 640(%rsp), %rax
movq %rax, 352(%rsp)
leaq 648(%rsp), %rax
movq %rax, 360(%rsp)
leaq 656(%rsp), %rax
movq %rax, 368(%rsp)
leaq 664(%rsp), %rax
movq %rax, 376(%rsp)
leaq 672(%rsp), %rax
movq %rax, 384(%rsp)
leaq 680(%rsp), %rax
movq %rax, 392(%rsp)
leaq 688(%rsp), %rax
movq %rax, 400(%rsp)
leaq 696(%rsp), %rax
movq %rax, 408(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $dispatchDots, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $440, %rsp # imm = 0x1B8
.cfi_adjust_cfa_offset -440
retq
.Lfunc_end0:
.size __device_stub__dispatchDots, .Lfunc_end0-__device_stub__dispatchDots
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $dispatchDots, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type dispatchDots,@object # @dispatchDots
.section .rodata,"a",@progbits
.globl dispatchDots
.p2align 3, 0x0
dispatchDots:
.quad __device_stub__dispatchDots
.size dispatchDots, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "dispatchDots"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__dispatchDots
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym dispatchDots
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //Based on the work of Andrew Krepps
#include <stdio.h>
#include <chrono>
__global__ void saxpy_pinned(int n, float a, float *x, float *y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = a*x[i] + y[i];
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
printf("Total number of operations: %d\n", totalThreads);
// =======================================================================================
// Start my code
float *h_a_page, *h_b_page, *h_res_page;
float *h_a_pin, *h_b_pin, *h_res_pin;
float *d_a, *d_b;
// host pageable memory
h_a_page = (float*)malloc(totalThreads*sizeof(float));
h_b_page = (float*)malloc(totalThreads*sizeof(float));
h_res_page = (float*)malloc(totalThreads*sizeof(float));
// host pinned memory
cudaMallocHost((void**)&h_a_pin, totalThreads*sizeof(float));
cudaMallocHost((void**)&h_b_pin, totalThreads*sizeof(float));
cudaMallocHost((void**)&h_res_pin, totalThreads*sizeof(float));
// device memory
cudaMalloc((void**)&d_a, totalThreads*sizeof(float));
cudaMalloc((void**)&d_b, totalThreads*sizeof(float));
for(int i=0; i< totalThreads; i++) {
h_a_page[i] = 1.0f;
h_a_pin[i] = 1.0f;
h_b_page[i] = 2.0f;
h_b_pin[i] = 2.0f;
}
// Implement with paged memory
auto start_paged = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_a, h_a_page, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b_page, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
cudaMemcpy(h_res_page, d_b, totalThreads*sizeof(float), cudaMemcpyDeviceToHost);
auto stop_paged = std::chrono::high_resolution_clock::now();
// Implement with pinned memory
auto start_pinned = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_a, h_a_pin, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b_pin, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
cudaMemcpy(h_res_pin, d_b, totalThreads*sizeof(float), cudaMemcpyDeviceToHost);
auto stop_pinned = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff_paged = stop_paged-start_paged;
std::chrono::duration<double> diff_pinned = stop_pinned-start_pinned;
bool equal=true;
for(int i=0; i<totalThreads; i++) {
if(h_res_page[i] != h_res_pin[i]) {
printf("pagedResult[%d] = %f \npinnedResult[%d] = %f\nThese results do not match and they should!", i, h_res_page[i], i, h_res_pin[i]);
equal=false;
}
}
if(equal)
printf("All results for paged and pinned memory are the same!\n\n");
if(diff_paged.count() > diff_pinned.count())
printf("Pinned memory ran %fs faster than paged, or %fx as fast\n", diff_paged.count()-diff_pinned.count(), diff_paged.count()/diff_pinned.count());
else
printf("Paged memory ran %fs faster than pinned, or %fx as fast\n", diff_pinned.count()-diff_paged.count(), diff_pinned.count()/diff_paged.count());
printf("runtime for paged: %f\nruntime for pinned: %f\n\n", diff_paged.count(), diff_pinned.count());
cudaFree(d_a);
cudaFree(d_b);
cudaFreeHost(h_a_pin);
cudaFreeHost(h_b_pin);
cudaFreeHost(h_res_pin);
} | code for sm_80
Function : _Z12saxpy_pinnedifPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fe400078e0205 */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FFMA R7, R2, c[0x0][0x164], R7 ; /* 0x0000590002077a23 */
/* 0x004fca0000000007 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //Based on the work of Andrew Krepps
#include <stdio.h>
#include <chrono>
__global__ void saxpy_pinned(int n, float a, float *x, float *y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = a*x[i] + y[i];
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
printf("Total number of operations: %d\n", totalThreads);
// =======================================================================================
// Start my code
float *h_a_page, *h_b_page, *h_res_page;
float *h_a_pin, *h_b_pin, *h_res_pin;
float *d_a, *d_b;
// host pageable memory
h_a_page = (float*)malloc(totalThreads*sizeof(float));
h_b_page = (float*)malloc(totalThreads*sizeof(float));
h_res_page = (float*)malloc(totalThreads*sizeof(float));
// host pinned memory
cudaMallocHost((void**)&h_a_pin, totalThreads*sizeof(float));
cudaMallocHost((void**)&h_b_pin, totalThreads*sizeof(float));
cudaMallocHost((void**)&h_res_pin, totalThreads*sizeof(float));
// device memory
cudaMalloc((void**)&d_a, totalThreads*sizeof(float));
cudaMalloc((void**)&d_b, totalThreads*sizeof(float));
for(int i=0; i< totalThreads; i++) {
h_a_page[i] = 1.0f;
h_a_pin[i] = 1.0f;
h_b_page[i] = 2.0f;
h_b_pin[i] = 2.0f;
}
// Implement with paged memory
auto start_paged = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_a, h_a_page, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b_page, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
cudaMemcpy(h_res_page, d_b, totalThreads*sizeof(float), cudaMemcpyDeviceToHost);
auto stop_paged = std::chrono::high_resolution_clock::now();
// Implement with pinned memory
auto start_pinned = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_a, h_a_pin, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b_pin, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
cudaMemcpy(h_res_pin, d_b, totalThreads*sizeof(float), cudaMemcpyDeviceToHost);
auto stop_pinned = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff_paged = stop_paged-start_paged;
std::chrono::duration<double> diff_pinned = stop_pinned-start_pinned;
bool equal=true;
for(int i=0; i<totalThreads; i++) {
if(h_res_page[i] != h_res_pin[i]) {
printf("pagedResult[%d] = %f \npinnedResult[%d] = %f\nThese results do not match and they should!", i, h_res_page[i], i, h_res_pin[i]);
equal=false;
}
}
if(equal)
printf("All results for paged and pinned memory are the same!\n\n");
if(diff_paged.count() > diff_pinned.count())
printf("Pinned memory ran %fs faster than paged, or %fx as fast\n", diff_paged.count()-diff_pinned.count(), diff_paged.count()/diff_pinned.count());
else
printf("Paged memory ran %fs faster than pinned, or %fx as fast\n", diff_pinned.count()-diff_paged.count(), diff_pinned.count()/diff_paged.count());
printf("runtime for paged: %f\nruntime for pinned: %f\n\n", diff_paged.count(), diff_pinned.count());
cudaFree(d_a);
cudaFree(d_b);
cudaFreeHost(h_a_pin);
cudaFreeHost(h_b_pin);
cudaFreeHost(h_res_pin);
} | .file "tmpxft_0019cfb8_00000000-6_assignment.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2163:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2163:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_
.type _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_, @function
_Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_:
.LFB2185:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movss %xmm0, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12saxpy_pinnedifPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2185:
.size _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_, .-_Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_
.globl _Z12saxpy_pinnedifPfS_
.type _Z12saxpy_pinnedifPfS_, @function
_Z12saxpy_pinnedifPfS_:
.LFB2186:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2186:
.size _Z12saxpy_pinnedifPfS_, .-_Z12saxpy_pinnedifPfS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Warning: Total thread count is not evenly divisible by the block size\n"
.align 8
.LC1:
.string "The total number of threads will be rounded up to %d\n"
.align 8
.LC2:
.string "Total number of operations: %d\n"
.align 8
.LC6:
.string "pagedResult[%d] = %f \npinnedResult[%d] = %f\nThese results do not match and they should!"
.align 8
.LC7:
.string "All results for paged and pinned memory are the same!\n\n"
.align 8
.LC8:
.string "Pinned memory ran %fs faster than paged, or %fx as fast\n"
.align 8
.LC9:
.string "Paged memory ran %fs faster than pinned, or %fx as fast\n"
.align 8
.LC10:
.string "runtime for paged: %f\nruntime for pinned: %f\n\n"
.text
.globl main
.type main, @function
main:
.LFB2156:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
cmpl $1, %edi
jg .L37
movl $1048576, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $4194304, %edi
call malloc@PLT
movq %rax, %r13
movl $4194304, %edi
call malloc@PLT
movq %rax, %r12
movl $4194304, %edi
call malloc@PLT
movq %rax, %rbp
leaq 40(%rsp), %rdi
movl $4194304, %esi
call cudaMallocHost@PLT
leaq 48(%rsp), %rdi
movl $4194304, %esi
call cudaMallocHost@PLT
leaq 56(%rsp), %rdi
movl $4194304, %esi
call cudaMallocHost@PLT
leaq 64(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
movl $256, (%rsp)
movl $4096, 8(%rsp)
movl $1048576, %r15d
movl $4194304, %ebx
movl $1048576, %r14d
.L27:
movl $0, %eax
movss .LC3(%rip), %xmm1
movss .LC4(%rip), %xmm0
.L16:
movss %xmm1, 0(%r13,%rax)
movq 40(%rsp), %rdx
movss %xmm1, (%rdx,%rax)
movss %xmm0, (%r12,%rax)
movq 48(%rsp), %rdx
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq %rbx, %rax
jne .L16
.L15:
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, 16(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl (%rsp), %eax
movl %eax, %r12d
movl %eax, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl 8(%rsp), %eax
movl %eax, %r13d
movl %eax, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L17:
movl $2, %ecx
movq %rbx, %rdx
movq 72(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, (%rsp)
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, 24(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq 40(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 48(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %r12d, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl %r13d, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L18:
movl $2, %ecx
movq %rbx, %rdx
movq 72(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq (%rsp), %rdx
movq 16(%rsp), %rcx
subq %rcx, %rdx
pxor %xmm1, %xmm1
cvtsi2sdq %rdx, %xmm1
movsd .LC5(%rip), %xmm0
divsd %xmm0, %xmm1
movsd %xmm1, 8(%rsp)
movq 24(%rsp), %rcx
subq %rcx, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
divsd %xmm0, %xmm1
movsd %xmm1, (%rsp)
testl %r15d, %r15d
jle .L19
movl $0, %ebx
movl $1, %edx
leaq .LC6(%rip), %r12
jmp .L22
.L37:
movl %edi, %ebx
movq %rsi, %rbp
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r15d
movl $256, (%rsp)
cmpl $2, %ebx
jg .L40
.L13:
movl %r15d, %eax
movl (%rsp), %ecx
cltd
idivl %ecx
movl %eax, 8(%rsp)
testl %edx, %edx
je .L14
addl $1, %eax
movl %eax, 8(%rsp)
imull %eax, %ecx
movl %ecx, %r15d
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r15d, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L14:
movl %r15d, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movslq %r15d, %r14
leaq 0(,%r14,4), %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r13
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
leaq 40(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocHost@PLT
leaq 48(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocHost@PLT
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocHost@PLT
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
testl %r15d, %r15d
jg .L27
jmp .L15
.L40:
movq 16(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, (%rsp)
jmp .L13
.L38:
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movss .LC3(%rip), %xmm0
movl %r15d, %edi
call _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_
jmp .L17
.L39:
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movss .LC3(%rip), %xmm0
movl %r15d, %edi
call _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_
jmp .L18
.L30:
movl %ebx, %edx
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl %ebx, %ecx
movq %r12, %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movl $0, %edx
.L20:
addq $1, %rbx
cmpq %r14, %rbx
je .L41
.L22:
movss 0(%rbp,%rbx,4), %xmm0
movq 56(%rsp), %rax
movss (%rax,%rbx,4), %xmm1
ucomiss %xmm1, %xmm0
jp .L30
je .L20
jmp .L30
.L41:
testb %dl, %dl
jne .L19
.L23:
movsd 8(%rsp), %xmm0
movsd (%rsp), %xmm2
comisd %xmm2, %xmm0
jbe .L34
movapd %xmm0, %xmm1
divsd %xmm2, %xmm1
subsd %xmm2, %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
.L26:
movsd (%rsp), %xmm1
movsd 8(%rsp), %xmm0
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFreeHost@PLT
movq 48(%rsp), %rdi
call cudaFreeHost@PLT
movq 56(%rsp), %rdi
call cudaFreeHost@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L42
movl $0, %eax
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L23
.L34:
movsd (%rsp), %xmm0
movapd %xmm0, %xmm1
movsd 8(%rsp), %xmm3
divsd %xmm3, %xmm1
subsd %xmm3, %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
jmp .L26
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2156:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC11:
.string "_Z12saxpy_pinnedifPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2188:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z12saxpy_pinnedifPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2188:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 1065353216
.align 4
.LC4:
.long 1073741824
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC5:
.long 0
.long 1104006501
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //Based on the work of Andrew Krepps
#include <stdio.h>
#include <chrono>
__global__ void saxpy_pinned(int n, float a, float *x, float *y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = a*x[i] + y[i];
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
printf("Total number of operations: %d\n", totalThreads);
// =======================================================================================
// Start my code
float *h_a_page, *h_b_page, *h_res_page;
float *h_a_pin, *h_b_pin, *h_res_pin;
float *d_a, *d_b;
// host pageable memory
h_a_page = (float*)malloc(totalThreads*sizeof(float));
h_b_page = (float*)malloc(totalThreads*sizeof(float));
h_res_page = (float*)malloc(totalThreads*sizeof(float));
// host pinned memory
cudaMallocHost((void**)&h_a_pin, totalThreads*sizeof(float));
cudaMallocHost((void**)&h_b_pin, totalThreads*sizeof(float));
cudaMallocHost((void**)&h_res_pin, totalThreads*sizeof(float));
// device memory
cudaMalloc((void**)&d_a, totalThreads*sizeof(float));
cudaMalloc((void**)&d_b, totalThreads*sizeof(float));
for(int i=0; i< totalThreads; i++) {
h_a_page[i] = 1.0f;
h_a_pin[i] = 1.0f;
h_b_page[i] = 2.0f;
h_b_pin[i] = 2.0f;
}
// Implement with paged memory
auto start_paged = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_a, h_a_page, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b_page, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
cudaMemcpy(h_res_page, d_b, totalThreads*sizeof(float), cudaMemcpyDeviceToHost);
auto stop_paged = std::chrono::high_resolution_clock::now();
// Implement with pinned memory
auto start_pinned = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_a, h_a_pin, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b_pin, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
cudaMemcpy(h_res_pin, d_b, totalThreads*sizeof(float), cudaMemcpyDeviceToHost);
auto stop_pinned = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff_paged = stop_paged-start_paged;
std::chrono::duration<double> diff_pinned = stop_pinned-start_pinned;
bool equal=true;
for(int i=0; i<totalThreads; i++) {
if(h_res_page[i] != h_res_pin[i]) {
printf("pagedResult[%d] = %f \npinnedResult[%d] = %f\nThese results do not match and they should!", i, h_res_page[i], i, h_res_pin[i]);
equal=false;
}
}
if(equal)
printf("All results for paged and pinned memory are the same!\n\n");
if(diff_paged.count() > diff_pinned.count())
printf("Pinned memory ran %fs faster than paged, or %fx as fast\n", diff_paged.count()-diff_pinned.count(), diff_paged.count()/diff_pinned.count());
else
printf("Paged memory ran %fs faster than pinned, or %fx as fast\n", diff_pinned.count()-diff_paged.count(), diff_pinned.count()/diff_paged.count());
printf("runtime for paged: %f\nruntime for pinned: %f\n\n", diff_paged.count(), diff_pinned.count());
cudaFree(d_a);
cudaFree(d_b);
cudaFreeHost(h_a_pin);
cudaFreeHost(h_b_pin);
cudaFreeHost(h_res_pin);
} | //Based on the work of Andrew Krepps
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <chrono>
__global__ void saxpy_pinned(int n, float a, float *x, float *y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = a*x[i] + y[i];
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
printf("Total number of operations: %d\n", totalThreads);
// =======================================================================================
// Start my code
float *h_a_page, *h_b_page, *h_res_page;
float *h_a_pin, *h_b_pin, *h_res_pin;
float *d_a, *d_b;
// host pageable memory
h_a_page = (float*)malloc(totalThreads*sizeof(float));
h_b_page = (float*)malloc(totalThreads*sizeof(float));
h_res_page = (float*)malloc(totalThreads*sizeof(float));
// host pinned memory
hipHostMalloc((void**)&h_a_pin, totalThreads*sizeof(float), hipHostMallocDefault);
hipHostMalloc((void**)&h_b_pin, totalThreads*sizeof(float), hipHostMallocDefault);
hipHostMalloc((void**)&h_res_pin, totalThreads*sizeof(float), hipHostMallocDefault);
// device memory
hipMalloc((void**)&d_a, totalThreads*sizeof(float));
hipMalloc((void**)&d_b, totalThreads*sizeof(float));
for(int i=0; i< totalThreads; i++) {
h_a_page[i] = 1.0f;
h_a_pin[i] = 1.0f;
h_b_page[i] = 2.0f;
h_b_pin[i] = 2.0f;
}
// Implement with paged memory
auto start_paged = std::chrono::high_resolution_clock::now();
hipMemcpy(d_a, h_a_page, totalThreads*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b_page, totalThreads*sizeof(float), hipMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
hipMemcpy(h_res_page, d_b, totalThreads*sizeof(float), hipMemcpyDeviceToHost);
auto stop_paged = std::chrono::high_resolution_clock::now();
// Implement with pinned memory
auto start_pinned = std::chrono::high_resolution_clock::now();
hipMemcpy(d_a, h_a_pin, totalThreads*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b_pin, totalThreads*sizeof(float), hipMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
hipMemcpy(h_res_pin, d_b, totalThreads*sizeof(float), hipMemcpyDeviceToHost);
auto stop_pinned = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff_paged = stop_paged-start_paged;
std::chrono::duration<double> diff_pinned = stop_pinned-start_pinned;
bool equal=true;
for(int i=0; i<totalThreads; i++) {
if(h_res_page[i] != h_res_pin[i]) {
printf("pagedResult[%d] = %f \npinnedResult[%d] = %f\nThese results do not match and they should!", i, h_res_page[i], i, h_res_pin[i]);
equal=false;
}
}
if(equal)
printf("All results for paged and pinned memory are the same!\n\n");
if(diff_paged.count() > diff_pinned.count())
printf("Pinned memory ran %fs faster than paged, or %fx as fast\n", diff_paged.count()-diff_pinned.count(), diff_paged.count()/diff_pinned.count());
else
printf("Paged memory ran %fs faster than pinned, or %fx as fast\n", diff_pinned.count()-diff_paged.count(), diff_pinned.count()/diff_paged.count());
printf("runtime for paged: %f\nruntime for pinned: %f\n\n", diff_paged.count(), diff_pinned.count());
hipFree(d_a);
hipFree(d_b);
hipHostFree(h_a_pin);
hipHostFree(h_b_pin);
hipHostFree(h_res_pin);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //Based on the work of Andrew Krepps
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <chrono>
__global__ void saxpy_pinned(int n, float a, float *x, float *y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = a*x[i] + y[i];
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
printf("Total number of operations: %d\n", totalThreads);
// =======================================================================================
// Start my code
float *h_a_page, *h_b_page, *h_res_page;
float *h_a_pin, *h_b_pin, *h_res_pin;
float *d_a, *d_b;
// host pageable memory
h_a_page = (float*)malloc(totalThreads*sizeof(float));
h_b_page = (float*)malloc(totalThreads*sizeof(float));
h_res_page = (float*)malloc(totalThreads*sizeof(float));
// host pinned memory
hipHostMalloc((void**)&h_a_pin, totalThreads*sizeof(float), hipHostMallocDefault);
hipHostMalloc((void**)&h_b_pin, totalThreads*sizeof(float), hipHostMallocDefault);
hipHostMalloc((void**)&h_res_pin, totalThreads*sizeof(float), hipHostMallocDefault);
// device memory
hipMalloc((void**)&d_a, totalThreads*sizeof(float));
hipMalloc((void**)&d_b, totalThreads*sizeof(float));
for(int i=0; i< totalThreads; i++) {
h_a_page[i] = 1.0f;
h_a_pin[i] = 1.0f;
h_b_page[i] = 2.0f;
h_b_pin[i] = 2.0f;
}
// Implement with paged memory
auto start_paged = std::chrono::high_resolution_clock::now();
hipMemcpy(d_a, h_a_page, totalThreads*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b_page, totalThreads*sizeof(float), hipMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
hipMemcpy(h_res_page, d_b, totalThreads*sizeof(float), hipMemcpyDeviceToHost);
auto stop_paged = std::chrono::high_resolution_clock::now();
// Implement with pinned memory
auto start_pinned = std::chrono::high_resolution_clock::now();
hipMemcpy(d_a, h_a_pin, totalThreads*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b_pin, totalThreads*sizeof(float), hipMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
hipMemcpy(h_res_pin, d_b, totalThreads*sizeof(float), hipMemcpyDeviceToHost);
auto stop_pinned = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff_paged = stop_paged-start_paged;
std::chrono::duration<double> diff_pinned = stop_pinned-start_pinned;
bool equal=true;
for(int i=0; i<totalThreads; i++) {
if(h_res_page[i] != h_res_pin[i]) {
printf("pagedResult[%d] = %f \npinnedResult[%d] = %f\nThese results do not match and they should!", i, h_res_page[i], i, h_res_pin[i]);
equal=false;
}
}
if(equal)
printf("All results for paged and pinned memory are the same!\n\n");
if(diff_paged.count() > diff_pinned.count())
printf("Pinned memory ran %fs faster than paged, or %fx as fast\n", diff_paged.count()-diff_pinned.count(), diff_paged.count()/diff_pinned.count());
else
printf("Paged memory ran %fs faster than pinned, or %fx as fast\n", diff_pinned.count()-diff_paged.count(), diff_pinned.count()/diff_paged.count());
printf("runtime for paged: %f\nruntime for pinned: %f\n\n", diff_paged.count(), diff_pinned.count());
hipFree(d_a);
hipFree(d_b);
hipHostFree(h_a_pin);
hipHostFree(h_b_pin);
hipHostFree(h_res_pin);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12saxpy_pinnedifPfS_
.globl _Z12saxpy_pinnedifPfS_
.p2align 8
.type _Z12saxpy_pinnedifPfS_,@function
_Z12saxpy_pinnedifPfS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_load_b32 s0, s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, s0, v2
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12saxpy_pinnedifPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12saxpy_pinnedifPfS_, .Lfunc_end0-_Z12saxpy_pinnedifPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12saxpy_pinnedifPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12saxpy_pinnedifPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //Based on the work of Andrew Krepps
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <chrono>
__global__ void saxpy_pinned(int n, float a, float *x, float *y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = a*x[i] + y[i];
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
printf("Total number of operations: %d\n", totalThreads);
// =======================================================================================
// Start my code
float *h_a_page, *h_b_page, *h_res_page;
float *h_a_pin, *h_b_pin, *h_res_pin;
float *d_a, *d_b;
// host pageable memory
h_a_page = (float*)malloc(totalThreads*sizeof(float));
h_b_page = (float*)malloc(totalThreads*sizeof(float));
h_res_page = (float*)malloc(totalThreads*sizeof(float));
// host pinned memory
hipHostMalloc((void**)&h_a_pin, totalThreads*sizeof(float), hipHostMallocDefault);
hipHostMalloc((void**)&h_b_pin, totalThreads*sizeof(float), hipHostMallocDefault);
hipHostMalloc((void**)&h_res_pin, totalThreads*sizeof(float), hipHostMallocDefault);
// device memory
hipMalloc((void**)&d_a, totalThreads*sizeof(float));
hipMalloc((void**)&d_b, totalThreads*sizeof(float));
for(int i=0; i< totalThreads; i++) {
h_a_page[i] = 1.0f;
h_a_pin[i] = 1.0f;
h_b_page[i] = 2.0f;
h_b_pin[i] = 2.0f;
}
// Implement with paged memory
auto start_paged = std::chrono::high_resolution_clock::now();
hipMemcpy(d_a, h_a_page, totalThreads*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b_page, totalThreads*sizeof(float), hipMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
hipMemcpy(h_res_page, d_b, totalThreads*sizeof(float), hipMemcpyDeviceToHost);
auto stop_paged = std::chrono::high_resolution_clock::now();
// Implement with pinned memory
auto start_pinned = std::chrono::high_resolution_clock::now();
hipMemcpy(d_a, h_a_pin, totalThreads*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b_pin, totalThreads*sizeof(float), hipMemcpyHostToDevice);
saxpy_pinned<<<numBlocks, blockSize>>>(totalThreads, 1.0f, d_a, d_b);
hipMemcpy(h_res_pin, d_b, totalThreads*sizeof(float), hipMemcpyDeviceToHost);
auto stop_pinned = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff_paged = stop_paged-start_paged;
std::chrono::duration<double> diff_pinned = stop_pinned-start_pinned;
bool equal=true;
for(int i=0; i<totalThreads; i++) {
if(h_res_page[i] != h_res_pin[i]) {
printf("pagedResult[%d] = %f \npinnedResult[%d] = %f\nThese results do not match and they should!", i, h_res_page[i], i, h_res_pin[i]);
equal=false;
}
}
if(equal)
printf("All results for paged and pinned memory are the same!\n\n");
if(diff_paged.count() > diff_pinned.count())
printf("Pinned memory ran %fs faster than paged, or %fx as fast\n", diff_paged.count()-diff_pinned.count(), diff_paged.count()/diff_pinned.count());
else
printf("Paged memory ran %fs faster than pinned, or %fx as fast\n", diff_pinned.count()-diff_paged.count(), diff_pinned.count()/diff_paged.count());
printf("runtime for paged: %f\nruntime for pinned: %f\n\n", diff_paged.count(), diff_pinned.count());
hipFree(d_a);
hipFree(d_b);
hipHostFree(h_a_pin);
hipHostFree(h_b_pin);
hipHostFree(h_res_pin);
} | .text
.file "assignment.hip"
.globl _Z27__device_stub__saxpy_pinnedifPfS_ # -- Begin function _Z27__device_stub__saxpy_pinnedifPfS_
.p2align 4, 0x90
.type _Z27__device_stub__saxpy_pinnedifPfS_,@function
_Z27__device_stub__saxpy_pinnedifPfS_: # @_Z27__device_stub__saxpy_pinnedifPfS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12saxpy_pinnedifPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z27__device_stub__saxpy_pinnedifPfS_, .Lfunc_end0-_Z27__device_stub__saxpy_pinnedifPfS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x41cdcd6500000000 # double 1.0E+9
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r14
movl %edi, %ebp
movl $1048576, %ebx # imm = 0x100000
cmpl $2, %edi
jl .LBB1_2
# %bb.1:
movq 8(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
.LBB1_2:
movl $256, %ecx # imm = 0x100
cmpl $3, %ebp
jl .LBB1_4
# %bb.3:
movq 16(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rcx
.LBB1_4:
movl %ebx, %eax
cltd
idivl %ecx
testl %edx, %edx
movq %rcx, 16(%rsp) # 8-byte Spill
je .LBB1_5
# %bb.6:
incl %eax
movl %eax, 36(%rsp) # 4-byte Spill
movl %eax, %ebx
imull %ecx, %ebx
movl $.Lstr, %edi
callq puts@PLT
movl $.L.str.1, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
jmp .LBB1_7
.LBB1_5:
movl %eax, 36(%rsp) # 4-byte Spill
.LBB1_7:
movl $.L.str.2, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movq %rbx, %rbp
movslq %ebp, %rbx
leaq (,%rbx,4), %r15
movq %r15, %rdi
callq malloc
movq %rax, %r13
movq %r15, %rdi
callq malloc
movq %rax, %r12
movq %r15, %rdi
callq malloc
movq %rax, %r14
leaq 64(%rsp), %rdi
movq %r15, %rsi
xorl %edx, %edx
callq hipHostMalloc
leaq 56(%rsp), %rdi
movq %r15, %rsi
xorl %edx, %edx
callq hipHostMalloc
leaq 48(%rsp), %rdi
movq %r15, %rsi
xorl %edx, %edx
callq hipHostMalloc
leaq 24(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq %rsp, %rdi
movq %r15, %rsi
callq hipMalloc
movq %rbp, 136(%rsp) # 8-byte Spill
movl %ebp, %ebp
testl %ebx, %ebx
jle .LBB1_10
# %bb.8: # %.lr.ph
movq 64(%rsp), %rax
movq 56(%rsp), %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB1_9: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%r13,%rdx,4) # imm = 0x3F800000
movl $1065353216, (%rax,%rdx,4) # imm = 0x3F800000
movl $1073741824, (%r12,%rdx,4) # imm = 0x40000000
movl $1073741824, (%rcx,%rdx,4) # imm = 0x40000000
incq %rdx
cmpq %rdx, %rbp
jne .LBB1_9
.LBB1_10: # %._crit_edge
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, 40(%rsp) # 8-byte Spill
movq 24(%rsp), %rdi
movq %r13, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
movq %r12, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movl 36(%rsp), %ebx # 4-byte Reload
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rbx
movl 16(%rsp), %r13d # 4-byte Reload
orq %rax, %r13
movq %rbx, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_12
# %bb.11:
movq 24(%rsp), %rax
movq (%rsp), %rcx
movq 136(%rsp), %rdx # 8-byte Reload
movl %edx, 12(%rsp)
movl $1065353216, 8(%rsp) # imm = 0x3F800000
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 128(%rsp), %rax
movq %rax, 160(%rsp)
leaq 120(%rsp), %rax
movq %rax, 168(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z12saxpy_pinnedifPfS_, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_12:
movq (%rsp), %rsi
movq %r14, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r12
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, 16(%rsp) # 8-byte Spill
movq 24(%rsp), %rdi
movq 64(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
movq 56(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq %rbx, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
movq 136(%rsp), %rbx # 8-byte Reload
jne .LBB1_14
# %bb.13:
movq 24(%rsp), %rax
movq (%rsp), %rcx
movl %ebx, 12(%rsp)
movl $1065353216, 8(%rsp) # imm = 0x3F800000
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 128(%rsp), %rax
movq %rax, 160(%rsp)
leaq 120(%rsp), %rax
movq %rax, 168(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z12saxpy_pinnedifPfS_, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_14:
movq 48(%rsp), %rdi
movq (%rsp), %rsi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
callq _ZNSt6chrono3_V212system_clock3nowEv
subq 40(%rsp), %r12 # 8-byte Folded Reload
cvtsi2sd %r12, %xmm1
movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero
divsd %xmm0, %xmm1
movsd %xmm1, 40(%rsp) # 8-byte Spill
subq 16(%rsp), %rax # 8-byte Folded Reload
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
divsd %xmm0, %xmm1
movsd %xmm1, 16(%rsp) # 8-byte Spill
testl %ebx, %ebx
jle .LBB1_20
# %bb.15: # %.lr.ph114.preheader
movb $1, %al
xorl %ebx, %ebx
jmp .LBB1_16
.p2align 4, 0x90
.LBB1_18: # in Loop: Header=BB1_16 Depth=1
incq %rbx
cmpq %rbx, %rbp
je .LBB1_19
.LBB1_16: # %.lr.ph114
# =>This Inner Loop Header: Depth=1
movss (%r14,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq 48(%rsp), %rcx
movss (%rcx,%rbx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
jne .LBB1_17
jnp .LBB1_18
.LBB1_17: # in Loop: Header=BB1_16 Depth=1
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl $.L.str.3, %edi
movl %ebx, %esi
movl %ebx, %edx
movb $2, %al
callq printf
xorl %eax, %eax
jmp .LBB1_18
.LBB1_19: # %._crit_edge115.loopexit
testb $1, %al
je .LBB1_21
.LBB1_20: # %.critedge
movl $.Lstr.1, %edi
callq puts@PLT
.LBB1_21:
movsd 40(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movsd 16(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
ucomisd %xmm0, %xmm1
jbe .LBB1_23
# %bb.22:
movapd %xmm0, %xmm2
movapd %xmm1, %xmm0
subsd %xmm2, %xmm0
divsd %xmm2, %xmm1
movl $.L.str.5, %edi
jmp .LBB1_24
.LBB1_23:
movapd %xmm0, %xmm2
subsd %xmm1, %xmm0
movapd %xmm1, %xmm3
movapd %xmm2, %xmm1
divsd %xmm3, %xmm1
movl $.L.str.6, %edi
.LBB1_24:
movb $2, %al
callq printf
movl $.L.str.7, %edi
movsd 40(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 16(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $2, %al
callq printf
movq 24(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipHostFree
movq 56(%rsp), %rdi
callq hipHostFree
movq 48(%rsp), %rdi
callq hipHostFree
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12saxpy_pinnedifPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12saxpy_pinnedifPfS_,@object # @_Z12saxpy_pinnedifPfS_
.section .rodata,"a",@progbits
.globl _Z12saxpy_pinnedifPfS_
.p2align 3, 0x0
_Z12saxpy_pinnedifPfS_:
.quad _Z27__device_stub__saxpy_pinnedifPfS_
.size _Z12saxpy_pinnedifPfS_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "The total number of threads will be rounded up to %d\n"
.size .L.str.1, 54
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Total number of operations: %d\n"
.size .L.str.2, 32
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "pagedResult[%d] = %f \npinnedResult[%d] = %f\nThese results do not match and they should!"
.size .L.str.3, 88
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Pinned memory ran %fs faster than paged, or %fx as fast\n"
.size .L.str.5, 57
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Paged memory ran %fs faster than pinned, or %fx as fast\n"
.size .L.str.6, 57
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "runtime for paged: %f\nruntime for pinned: %f\n\n"
.size .L.str.7, 47
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12saxpy_pinnedifPfS_"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Warning: Total thread count is not evenly divisible by the block size"
.size .Lstr, 70
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "All results for paged and pinned memory are the same!\n"
.size .Lstr.1, 55
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__saxpy_pinnedifPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12saxpy_pinnedifPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z12saxpy_pinnedifPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x160], PT ; /* 0x0000580004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004047625 */
/* 0x000fe400078e0205 */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FFMA R7, R2, c[0x0][0x164], R7 ; /* 0x0000590002077a23 */
/* 0x004fca0000000007 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12saxpy_pinnedifPfS_
.globl _Z12saxpy_pinnedifPfS_
.p2align 8
.type _Z12saxpy_pinnedifPfS_,@function
_Z12saxpy_pinnedifPfS_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_load_b32 s0, s[0:1], 0x4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, s0, v2
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12saxpy_pinnedifPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12saxpy_pinnedifPfS_, .Lfunc_end0-_Z12saxpy_pinnedifPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12saxpy_pinnedifPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12saxpy_pinnedifPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0019cfb8_00000000-6_assignment.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2163:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2163:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_
.type _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_, @function
_Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_:
.LFB2185:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movss %xmm0, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12saxpy_pinnedifPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2185:
.size _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_, .-_Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_
.globl _Z12saxpy_pinnedifPfS_
.type _Z12saxpy_pinnedifPfS_, @function
_Z12saxpy_pinnedifPfS_:
.LFB2186:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2186:
.size _Z12saxpy_pinnedifPfS_, .-_Z12saxpy_pinnedifPfS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Warning: Total thread count is not evenly divisible by the block size\n"
.align 8
.LC1:
.string "The total number of threads will be rounded up to %d\n"
.align 8
.LC2:
.string "Total number of operations: %d\n"
.align 8
.LC6:
.string "pagedResult[%d] = %f \npinnedResult[%d] = %f\nThese results do not match and they should!"
.align 8
.LC7:
.string "All results for paged and pinned memory are the same!\n\n"
.align 8
.LC8:
.string "Pinned memory ran %fs faster than paged, or %fx as fast\n"
.align 8
.LC9:
.string "Paged memory ran %fs faster than pinned, or %fx as fast\n"
.align 8
.LC10:
.string "runtime for paged: %f\nruntime for pinned: %f\n\n"
.text
.globl main
.type main, @function
main:
.LFB2156:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
cmpl $1, %edi
jg .L37
movl $1048576, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $4194304, %edi
call malloc@PLT
movq %rax, %r13
movl $4194304, %edi
call malloc@PLT
movq %rax, %r12
movl $4194304, %edi
call malloc@PLT
movq %rax, %rbp
leaq 40(%rsp), %rdi
movl $4194304, %esi
call cudaMallocHost@PLT
leaq 48(%rsp), %rdi
movl $4194304, %esi
call cudaMallocHost@PLT
leaq 56(%rsp), %rdi
movl $4194304, %esi
call cudaMallocHost@PLT
leaq 64(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
movl $256, (%rsp)
movl $4096, 8(%rsp)
movl $1048576, %r15d
movl $4194304, %ebx
movl $1048576, %r14d
.L27:
movl $0, %eax
movss .LC3(%rip), %xmm1
movss .LC4(%rip), %xmm0
.L16:
movss %xmm1, 0(%r13,%rax)
movq 40(%rsp), %rdx
movss %xmm1, (%rdx,%rax)
movss %xmm0, (%r12,%rax)
movq 48(%rsp), %rdx
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq %rbx, %rax
jne .L16
.L15:
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, 16(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl (%rsp), %eax
movl %eax, %r12d
movl %eax, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl 8(%rsp), %eax
movl %eax, %r13d
movl %eax, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L17:
movl $2, %ecx
movq %rbx, %rdx
movq 72(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, (%rsp)
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, 24(%rsp)
movl $1, %ecx
movq %rbx, %rdx
movq 40(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 48(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %r12d, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl %r13d, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L18:
movl $2, %ecx
movq %rbx, %rdx
movq 72(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq (%rsp), %rdx
movq 16(%rsp), %rcx
subq %rcx, %rdx
pxor %xmm1, %xmm1
cvtsi2sdq %rdx, %xmm1
movsd .LC5(%rip), %xmm0
divsd %xmm0, %xmm1
movsd %xmm1, 8(%rsp)
movq 24(%rsp), %rcx
subq %rcx, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
divsd %xmm0, %xmm1
movsd %xmm1, (%rsp)
testl %r15d, %r15d
jle .L19
movl $0, %ebx
movl $1, %edx
leaq .LC6(%rip), %r12
jmp .L22
.L37:
movl %edi, %ebx
movq %rsi, %rbp
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r15d
movl $256, (%rsp)
cmpl $2, %ebx
jg .L40
.L13:
movl %r15d, %eax
movl (%rsp), %ecx
cltd
idivl %ecx
movl %eax, 8(%rsp)
testl %edx, %edx
je .L14
addl $1, %eax
movl %eax, 8(%rsp)
imull %eax, %ecx
movl %ecx, %r15d
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r15d, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L14:
movl %r15d, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movslq %r15d, %r14
leaq 0(,%r14,4), %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r13
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
leaq 40(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocHost@PLT
leaq 48(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocHost@PLT
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMallocHost@PLT
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
testl %r15d, %r15d
jg .L27
jmp .L15
.L40:
movq 16(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, (%rsp)
jmp .L13
.L38:
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movss .LC3(%rip), %xmm0
movl %r15d, %edi
call _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_
jmp .L17
.L39:
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movss .LC3(%rip), %xmm0
movl %r15d, %edi
call _Z36__device_stub__Z12saxpy_pinnedifPfS_ifPfS_
jmp .L18
.L30:
movl %ebx, %edx
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl %ebx, %ecx
movq %r12, %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movl $0, %edx
.L20:
addq $1, %rbx
cmpq %r14, %rbx
je .L41
.L22:
movss 0(%rbp,%rbx,4), %xmm0
movq 56(%rsp), %rax
movss (%rax,%rbx,4), %xmm1
ucomiss %xmm1, %xmm0
jp .L30
je .L20
jmp .L30
.L41:
testb %dl, %dl
jne .L19
.L23:
movsd 8(%rsp), %xmm0
movsd (%rsp), %xmm2
comisd %xmm2, %xmm0
jbe .L34
movapd %xmm0, %xmm1
divsd %xmm2, %xmm1
subsd %xmm2, %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
.L26:
movsd (%rsp), %xmm1
movsd 8(%rsp), %xmm0
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFreeHost@PLT
movq 48(%rsp), %rdi
call cudaFreeHost@PLT
movq 56(%rsp), %rdi
call cudaFreeHost@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L42
movl $0, %eax
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L23
.L34:
movsd (%rsp), %xmm0
movapd %xmm0, %xmm1
movsd 8(%rsp), %xmm3
divsd %xmm3, %xmm1
subsd %xmm3, %xmm0
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
jmp .L26
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2156:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC11:
.string "_Z12saxpy_pinnedifPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2188:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z12saxpy_pinnedifPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2188:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 1065353216
.align 4
.LC4:
.long 1073741824
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC5:
.long 0
.long 1104006501
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "assignment.hip"
.globl _Z27__device_stub__saxpy_pinnedifPfS_ # -- Begin function _Z27__device_stub__saxpy_pinnedifPfS_
.p2align 4, 0x90
.type _Z27__device_stub__saxpy_pinnedifPfS_,@function
_Z27__device_stub__saxpy_pinnedifPfS_: # @_Z27__device_stub__saxpy_pinnedifPfS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movss %xmm0, 8(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12saxpy_pinnedifPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z27__device_stub__saxpy_pinnedifPfS_, .Lfunc_end0-_Z27__device_stub__saxpy_pinnedifPfS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x41cdcd6500000000 # double 1.0E+9
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r14
movl %edi, %ebp
movl $1048576, %ebx # imm = 0x100000
cmpl $2, %edi
jl .LBB1_2
# %bb.1:
movq 8(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
.LBB1_2:
movl $256, %ecx # imm = 0x100
cmpl $3, %ebp
jl .LBB1_4
# %bb.3:
movq 16(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rcx
.LBB1_4:
movl %ebx, %eax
cltd
idivl %ecx
testl %edx, %edx
movq %rcx, 16(%rsp) # 8-byte Spill
je .LBB1_5
# %bb.6:
incl %eax
movl %eax, 36(%rsp) # 4-byte Spill
movl %eax, %ebx
imull %ecx, %ebx
movl $.Lstr, %edi
callq puts@PLT
movl $.L.str.1, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
jmp .LBB1_7
.LBB1_5:
movl %eax, 36(%rsp) # 4-byte Spill
.LBB1_7:
movl $.L.str.2, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movq %rbx, %rbp
movslq %ebp, %rbx
leaq (,%rbx,4), %r15
movq %r15, %rdi
callq malloc
movq %rax, %r13
movq %r15, %rdi
callq malloc
movq %rax, %r12
movq %r15, %rdi
callq malloc
movq %rax, %r14
leaq 64(%rsp), %rdi
movq %r15, %rsi
xorl %edx, %edx
callq hipHostMalloc
leaq 56(%rsp), %rdi
movq %r15, %rsi
xorl %edx, %edx
callq hipHostMalloc
leaq 48(%rsp), %rdi
movq %r15, %rsi
xorl %edx, %edx
callq hipHostMalloc
leaq 24(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq %rsp, %rdi
movq %r15, %rsi
callq hipMalloc
movq %rbp, 136(%rsp) # 8-byte Spill
movl %ebp, %ebp
testl %ebx, %ebx
jle .LBB1_10
# %bb.8: # %.lr.ph
movq 64(%rsp), %rax
movq 56(%rsp), %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB1_9: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%r13,%rdx,4) # imm = 0x3F800000
movl $1065353216, (%rax,%rdx,4) # imm = 0x3F800000
movl $1073741824, (%r12,%rdx,4) # imm = 0x40000000
movl $1073741824, (%rcx,%rdx,4) # imm = 0x40000000
incq %rdx
cmpq %rdx, %rbp
jne .LBB1_9
.LBB1_10: # %._crit_edge
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, 40(%rsp) # 8-byte Spill
movq 24(%rsp), %rdi
movq %r13, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
movq %r12, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movl 36(%rsp), %ebx # 4-byte Reload
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rbx
movl 16(%rsp), %r13d # 4-byte Reload
orq %rax, %r13
movq %rbx, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_12
# %bb.11:
movq 24(%rsp), %rax
movq (%rsp), %rcx
movq 136(%rsp), %rdx # 8-byte Reload
movl %edx, 12(%rsp)
movl $1065353216, 8(%rsp) # imm = 0x3F800000
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 128(%rsp), %rax
movq %rax, 160(%rsp)
leaq 120(%rsp), %rax
movq %rax, 168(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z12saxpy_pinnedifPfS_, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_12:
movq (%rsp), %rsi
movq %r14, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r12
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, 16(%rsp) # 8-byte Spill
movq 24(%rsp), %rdi
movq 64(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
movq 56(%rsp), %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movq %rbx, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
movq 136(%rsp), %rbx # 8-byte Reload
jne .LBB1_14
# %bb.13:
movq 24(%rsp), %rax
movq (%rsp), %rcx
movl %ebx, 12(%rsp)
movl $1065353216, 8(%rsp) # imm = 0x3F800000
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 128(%rsp), %rax
movq %rax, 160(%rsp)
leaq 120(%rsp), %rax
movq %rax, 168(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z12saxpy_pinnedifPfS_, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_14:
movq 48(%rsp), %rdi
movq (%rsp), %rsi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
callq _ZNSt6chrono3_V212system_clock3nowEv
subq 40(%rsp), %r12 # 8-byte Folded Reload
cvtsi2sd %r12, %xmm1
movsd .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero
divsd %xmm0, %xmm1
movsd %xmm1, 40(%rsp) # 8-byte Spill
subq 16(%rsp), %rax # 8-byte Folded Reload
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
divsd %xmm0, %xmm1
movsd %xmm1, 16(%rsp) # 8-byte Spill
testl %ebx, %ebx
jle .LBB1_20
# %bb.15: # %.lr.ph114.preheader
movb $1, %al
xorl %ebx, %ebx
jmp .LBB1_16
.p2align 4, 0x90
.LBB1_18: # in Loop: Header=BB1_16 Depth=1
incq %rbx
cmpq %rbx, %rbp
je .LBB1_19
.LBB1_16: # %.lr.ph114
# =>This Inner Loop Header: Depth=1
movss (%r14,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq 48(%rsp), %rcx
movss (%rcx,%rbx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
jne .LBB1_17
jnp .LBB1_18
.LBB1_17: # in Loop: Header=BB1_16 Depth=1
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl $.L.str.3, %edi
movl %ebx, %esi
movl %ebx, %edx
movb $2, %al
callq printf
xorl %eax, %eax
jmp .LBB1_18
.LBB1_19: # %._crit_edge115.loopexit
testb $1, %al
je .LBB1_21
.LBB1_20: # %.critedge
movl $.Lstr.1, %edi
callq puts@PLT
.LBB1_21:
movsd 40(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movsd 16(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
ucomisd %xmm0, %xmm1
jbe .LBB1_23
# %bb.22:
movapd %xmm0, %xmm2
movapd %xmm1, %xmm0
subsd %xmm2, %xmm0
divsd %xmm2, %xmm1
movl $.L.str.5, %edi
jmp .LBB1_24
.LBB1_23:
movapd %xmm0, %xmm2
subsd %xmm1, %xmm0
movapd %xmm1, %xmm3
movapd %xmm2, %xmm1
divsd %xmm3, %xmm1
movl $.L.str.6, %edi
.LBB1_24:
movb $2, %al
callq printf
movl $.L.str.7, %edi
movsd 40(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 16(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $2, %al
callq printf
movq 24(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipHostFree
movq 56(%rsp), %rdi
callq hipHostFree
movq 48(%rsp), %rdi
callq hipHostFree
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12saxpy_pinnedifPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12saxpy_pinnedifPfS_,@object # @_Z12saxpy_pinnedifPfS_
.section .rodata,"a",@progbits
.globl _Z12saxpy_pinnedifPfS_
.p2align 3, 0x0
_Z12saxpy_pinnedifPfS_:
.quad _Z27__device_stub__saxpy_pinnedifPfS_
.size _Z12saxpy_pinnedifPfS_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "The total number of threads will be rounded up to %d\n"
.size .L.str.1, 54
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Total number of operations: %d\n"
.size .L.str.2, 32
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "pagedResult[%d] = %f \npinnedResult[%d] = %f\nThese results do not match and they should!"
.size .L.str.3, 88
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Pinned memory ran %fs faster than paged, or %fx as fast\n"
.size .L.str.5, 57
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Paged memory ran %fs faster than pinned, or %fx as fast\n"
.size .L.str.6, 57
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "runtime for paged: %f\nruntime for pinned: %f\n\n"
.size .L.str.7, 47
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12saxpy_pinnedifPfS_"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Warning: Total thread count is not evenly divisible by the block size"
.size .Lstr, 70
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "All results for paged and pinned memory are the same!\n"
.size .Lstr.1, 55
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__saxpy_pinnedifPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12saxpy_pinnedifPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<cuda.h>
#include<stdio.h>
#include<math.h>
const double PI = 3.141592653589793238460;
__global__
void vecFFTKernel(float* A, float* C, int n){
//identify the index of the data to be read
int i= threadIdx.x + blockDim.x * blockIdx.x;
C[i]=PI*A[i];
}
__host__
void vecFFT(float* A,float* C, int n){
int c=ceil(n/256.0);
int size = n * sizeof(float);
float *d_A, *d_C;
//Allocate device memory for A,C
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_C, size);
//copy A,B to device memory
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
//call kernal function that the calculates sum and stores it in C
vecFFTKernel<<< ceil(n/256.0),256 >>>(d_A,d_C,n);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
cudaMemcpy( C,d_C, size, cudaMemcpyDeviceToHost);
//free device memories
cudaFree(d_A);
cudaFree(d_C);
}
//Kernal function that runs in each thread
int main(){
float *A,*C;
int n=32; //must be a power of 2
A=(float*)malloc(n*sizeof(float));
C=(float*)malloc(n*sizeof(float));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
}
vecFFT(A,C,n);
for(i=0;i<n;i++){
printf("%f ",C[i]);
}
free(A);
free(C);
return 0;
} | code for sm_80
Function : _Z12vecFFTKernelPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0050*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fcc00078e0207 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x168] ; /* 0x00005a0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ F2F.F64.F32 R4, R2 ; /* 0x0000000200047310 */
/* 0x004e240000201800 */
/*00a0*/ DMUL R4, R4, c[0x2][0x0] ; /* 0x0080000004047a28 */
/* 0x001e140000000000 */
/*00b0*/ F2F.F32.F64 R5, R4 ; /* 0x0000000400057310 */
/* 0x001e240000301000 */
/*00c0*/ STG.E [R6.64], R5 ; /* 0x0000000506007986 */
/* 0x001fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<cuda.h>
#include<stdio.h>
#include<math.h>
const double PI = 3.141592653589793238460;
__global__
void vecFFTKernel(float* A, float* C, int n){
//identify the index of the data to be read
int i= threadIdx.x + blockDim.x * blockIdx.x;
C[i]=PI*A[i];
}
__host__
void vecFFT(float* A,float* C, int n){
int c=ceil(n/256.0);
int size = n * sizeof(float);
float *d_A, *d_C;
//Allocate device memory for A,C
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_C, size);
//copy A,B to device memory
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
//call kernal function that the calculates sum and stores it in C
vecFFTKernel<<< ceil(n/256.0),256 >>>(d_A,d_C,n);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
cudaMemcpy( C,d_C, size, cudaMemcpyDeviceToHost);
//free device memories
cudaFree(d_A);
cudaFree(d_C);
}
//Kernal function that runs in each thread
int main(){
float *A,*C;
int n=32; //must be a power of 2
A=(float*)malloc(n*sizeof(float));
C=(float*)malloc(n*sizeof(float));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
}
vecFFT(A,C,n);
for(i=0;i<n;i++){
printf("%f ",C[i]);
}
free(A);
free(C);
return 0;
} | .file "tmpxft_00019e4f_00000000-6_fft.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i
.type _Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i, @function
_Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12vecFFTKernelPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i, .-_Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i
.globl _Z12vecFFTKernelPfS_i
.type _Z12vecFFTKernelPfS_i, @function
_Z12vecFFTKernelPfS_i:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z12vecFFTKernelPfS_i, .-_Z12vecFFTKernelPfS_i
.globl _Z6vecFFTPfS_i
.type _Z6vecFFTPfS_i, @function
_Z6vecFFTPfS_i:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r13
movq %rsi, %r12
movl %edx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
pxor %xmm0, %xmm0
cvtsi2sdl %edx, %xmm0
mulsd .LC0(%rip), %xmm0
movsd %xmm0, 8(%rsp)
movsd .LC4(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC1(%rip), %xmm3
ucomisd %xmm1, %xmm3
jbe .L12
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
movapd %xmm0, %xmm3
cmpnlesd %xmm1, %xmm3
movsd .LC3(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm3, %xmm1
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm1
movsd %xmm1, 8(%rsp)
.L12:
leal 0(,%rbp,4), %ebx
movslq %ebx, %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
cvttsd2siq 8(%rsp), %rax
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L13:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movl %ebp, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i
jmp .L13
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z6vecFFTPfS_i, .-_Z6vecFFTPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "%f "
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl $128, %edi
call malloc@PLT
movq %rax, %rbp
movl $128, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %eax
.L19:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $32, %rax
jne .L19
movl $32, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z6vecFFTPfS_i
movq %r12, %rbx
leaq 128(%r12), %r14
leaq .LC5(%rip), %r13
.L20:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r14, %rbx
jne .L20
movq %rbp, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movl $0, %eax
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z12vecFFTKernelPfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z12vecFFTKernelPfS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1064304640
.align 8
.LC1:
.long 0
.long 1127219200
.align 8
.LC3:
.long 0
.long 1072693248
.align 8
.LC4:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<cuda.h>
#include<stdio.h>
#include<math.h>
const double PI = 3.141592653589793238460;
__global__
void vecFFTKernel(float* A, float* C, int n){
//identify the index of the data to be read
int i= threadIdx.x + blockDim.x * blockIdx.x;
C[i]=PI*A[i];
}
__host__
void vecFFT(float* A,float* C, int n){
int c=ceil(n/256.0);
int size = n * sizeof(float);
float *d_A, *d_C;
//Allocate device memory for A,C
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_C, size);
//copy A,B to device memory
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
//call kernal function that the calculates sum and stores it in C
vecFFTKernel<<< ceil(n/256.0),256 >>>(d_A,d_C,n);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
cudaMemcpy( C,d_C, size, cudaMemcpyDeviceToHost);
//free device memories
cudaFree(d_A);
cudaFree(d_C);
}
//Kernal function that runs in each thread
int main(){
float *A,*C;
int n=32; //must be a power of 2
A=(float*)malloc(n*sizeof(float));
C=(float*)malloc(n*sizeof(float));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
}
vecFFT(A,C,n);
for(i=0;i<n;i++){
printf("%f ",C[i]);
}
free(A);
free(C);
return 0;
} | #include<hip/hip_runtime.h>
#include<stdio.h>
#include<math.h>
const double PI = 3.141592653589793238460;
__global__
void vecFFTKernel(float* A, float* C, int n){
//identify the index of the data to be read
int i= threadIdx.x + blockDim.x * blockIdx.x;
C[i]=PI*A[i];
}
__host__
void vecFFT(float* A,float* C, int n){
int c=ceil(n/256.0);
int size = n * sizeof(float);
float *d_A, *d_C;
//Allocate device memory for A,C
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_C, size);
//copy A,B to device memory
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
//call kernal function that the calculates sum and stores it in C
vecFFTKernel<<< ceil(n/256.0),256 >>>(d_A,d_C,n);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
hipMemcpy( C,d_C, size, hipMemcpyDeviceToHost);
//free device memories
hipFree(d_A);
hipFree(d_C);
}
//Kernal function that runs in each thread
int main(){
float *A,*C;
int n=32; //must be a power of 2
A=(float*)malloc(n*sizeof(float));
C=(float*)malloc(n*sizeof(float));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
}
vecFFT(A,C,n);
for(i=0;i<n;i++){
printf("%f ",C[i]);
}
free(A);
free(C);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<hip/hip_runtime.h>
#include<stdio.h>
#include<math.h>
const double PI = 3.141592653589793238460;
__global__
void vecFFTKernel(float* A, float* C, int n){
//identify the index of the data to be read
int i= threadIdx.x + blockDim.x * blockIdx.x;
C[i]=PI*A[i];
}
__host__
void vecFFT(float* A,float* C, int n){
int c=ceil(n/256.0);
int size = n * sizeof(float);
float *d_A, *d_C;
//Allocate device memory for A,C
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_C, size);
//copy A,B to device memory
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
//call kernal function that the calculates sum and stores it in C
vecFFTKernel<<< ceil(n/256.0),256 >>>(d_A,d_C,n);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
hipMemcpy( C,d_C, size, hipMemcpyDeviceToHost);
//free device memories
hipFree(d_A);
hipFree(d_C);
}
//Kernal function that runs in each thread
int main(){
float *A,*C;
int n=32; //must be a power of 2
A=(float*)malloc(n*sizeof(float));
C=(float*)malloc(n*sizeof(float));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
}
vecFFT(A,C,n);
for(i=0;i<n;i++){
printf("%f ",C[i]);
}
free(A);
free(C);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12vecFFTKernelPfS_i
.globl _Z12vecFFTKernelPfS_i
.p2align 8
.type _Z12vecFFTKernelPfS_i,@function
_Z12vecFFTKernelPfS_i:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
s_mov_b32 s1, 0x400921fb
s_mov_b32 s0, 0x54442d18
v_add_co_u32 v0, vcc_lo, s2, v0
global_load_b32 v2, v[2:3], off
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
v_cvt_f64_f32_e32 v[2:3], v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[2:3], v[2:3], s[0:1]
v_cvt_f32_f64_e32 v2, v[2:3]
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12vecFFTKernelPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12vecFFTKernelPfS_i, .Lfunc_end0-_Z12vecFFTKernelPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12vecFFTKernelPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12vecFFTKernelPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include<hip/hip_runtime.h>
#include<stdio.h>
#include<math.h>
const double PI = 3.141592653589793238460;
__global__
void vecFFTKernel(float* A, float* C, int n){
//identify the index of the data to be read
int i= threadIdx.x + blockDim.x * blockIdx.x;
C[i]=PI*A[i];
}
__host__
void vecFFT(float* A,float* C, int n){
int c=ceil(n/256.0);
int size = n * sizeof(float);
float *d_A, *d_C;
//Allocate device memory for A,C
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_C, size);
//copy A,B to device memory
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
//call kernal function that the calculates sum and stores it in C
vecFFTKernel<<< ceil(n/256.0),256 >>>(d_A,d_C,n);
//the y and z dimensions are set to 1 by default
//copy C from devce memory
hipMemcpy( C,d_C, size, hipMemcpyDeviceToHost);
//free device memories
hipFree(d_A);
hipFree(d_C);
}
//Kernal function that runs in each thread
int main(){
float *A,*C;
int n=32; //must be a power of 2
A=(float*)malloc(n*sizeof(float));
C=(float*)malloc(n*sizeof(float));
int i;
for(i=0;i<n;i++){
A[i]=(float)i;
}
vecFFT(A,C,n);
for(i=0;i<n;i++){
printf("%f ",C[i]);
}
free(A);
free(C);
return 0;
} | .text
.file "fft.hip"
.globl _Z27__device_stub__vecFFTKernelPfS_i # -- Begin function _Z27__device_stub__vecFFTKernelPfS_i
.p2align 4, 0x90
.type _Z27__device_stub__vecFFTKernelPfS_i,@function
_Z27__device_stub__vecFFTKernelPfS_i: # @_Z27__device_stub__vecFFTKernelPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12vecFFTKernelPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z27__device_stub__vecFFTKernelPfS_i, .Lfunc_end0-_Z27__device_stub__vecFFTKernelPfS_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z6vecFFTPfS_i
.LCPI1_0:
.quad 0x3f70000000000000 # double 0.00390625
.text
.globl _Z6vecFFTPfS_i
.p2align 4, 0x90
.type _Z6vecFFTPfS_i,@function
_Z6vecFFTPfS_i: # @_Z6vecFFTPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %r15d
movq %rsi, %rbx
movq %rdi, %r12
cvtsi2sd %edx, %xmm0
mulsd .LCPI1_0(%rip), %xmm0
callq ceil@PLT
movsd %xmm0, 24(%rsp) # 8-byte Spill
leal (,%r15,4), %eax
movslq %eax, %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
cvttsd2si 24(%rsp), %rax # 8-byte Folded Reload
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl %r15d, 20(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12vecFFTKernelPfS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z6vecFFTPfS_i, .Lfunc_end1-_Z6vecFFTPfS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $128, %edi
callq malloc
movq %rax, %rbx
movl $128, %edi
callq malloc
movq %rax, %r14
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%rax,4)
incq %rax
cmpq $32, %rax
jne .LBB2_1
# %bb.2:
movq %rbx, %rdi
movq %r14, %rsi
movl $32, %edx
callq _Z6vecFFTPfS_i
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_3: # =>This Inner Loop Header: Depth=1
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r15
cmpq $32, %r15
jne .LBB2_3
# %bb.4:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12vecFFTKernelPfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12vecFFTKernelPfS_i,@object # @_Z12vecFFTKernelPfS_i
.section .rodata,"a",@progbits
.globl _Z12vecFFTKernelPfS_i
.p2align 3, 0x0
_Z12vecFFTKernelPfS_i:
.quad _Z27__device_stub__vecFFTKernelPfS_i
.size _Z12vecFFTKernelPfS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f "
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12vecFFTKernelPfS_i"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__vecFFTKernelPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12vecFFTKernelPfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z12vecFFTKernelPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0050*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fcc00078e0207 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x168] ; /* 0x00005a0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ F2F.F64.F32 R4, R2 ; /* 0x0000000200047310 */
/* 0x004e240000201800 */
/*00a0*/ DMUL R4, R4, c[0x2][0x0] ; /* 0x0080000004047a28 */
/* 0x001e140000000000 */
/*00b0*/ F2F.F32.F64 R5, R4 ; /* 0x0000000400057310 */
/* 0x001e240000301000 */
/*00c0*/ STG.E [R6.64], R5 ; /* 0x0000000506007986 */
/* 0x001fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12vecFFTKernelPfS_i
.globl _Z12vecFFTKernelPfS_i
.p2align 8
.type _Z12vecFFTKernelPfS_i,@function
_Z12vecFFTKernelPfS_i:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
s_mov_b32 s1, 0x400921fb
s_mov_b32 s0, 0x54442d18
v_add_co_u32 v0, vcc_lo, s2, v0
global_load_b32 v2, v[2:3], off
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
v_cvt_f64_f32_e32 v[2:3], v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[2:3], v[2:3], s[0:1]
v_cvt_f32_f64_e32 v2, v[2:3]
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12vecFFTKernelPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12vecFFTKernelPfS_i, .Lfunc_end0-_Z12vecFFTKernelPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12vecFFTKernelPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12vecFFTKernelPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00019e4f_00000000-6_fft.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i
.type _Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i, @function
_Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12vecFFTKernelPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i, .-_Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i
.globl _Z12vecFFTKernelPfS_i
.type _Z12vecFFTKernelPfS_i, @function
_Z12vecFFTKernelPfS_i:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z12vecFFTKernelPfS_i, .-_Z12vecFFTKernelPfS_i
.globl _Z6vecFFTPfS_i
.type _Z6vecFFTPfS_i, @function
_Z6vecFFTPfS_i:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r13
movq %rsi, %r12
movl %edx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
pxor %xmm0, %xmm0
cvtsi2sdl %edx, %xmm0
mulsd .LC0(%rip), %xmm0
movsd %xmm0, 8(%rsp)
movsd .LC4(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC1(%rip), %xmm3
ucomisd %xmm1, %xmm3
jbe .L12
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
movapd %xmm0, %xmm3
cmpnlesd %xmm1, %xmm3
movsd .LC3(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm3, %xmm1
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm1
movsd %xmm1, 8(%rsp)
.L12:
leal 0(,%rbp,4), %ebx
movslq %ebx, %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
cvttsd2siq 8(%rsp), %rax
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L13:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movl %ebp, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z35__device_stub__Z12vecFFTKernelPfS_iPfS_i
jmp .L13
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z6vecFFTPfS_i, .-_Z6vecFFTPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "%f "
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl $128, %edi
call malloc@PLT
movq %rax, %rbp
movl $128, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %eax
.L19:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $32, %rax
jne .L19
movl $32, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z6vecFFTPfS_i
movq %r12, %rbx
leaq 128(%r12), %r14
leaq .LC5(%rip), %r13
.L20:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r14, %rbx
jne .L20
movq %rbp, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movl $0, %eax
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z12vecFFTKernelPfS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z12vecFFTKernelPfS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1064304640
.align 8
.LC1:
.long 0
.long 1127219200
.align 8
.LC3:
.long 0
.long 1072693248
.align 8
.LC4:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "fft.hip"
.globl _Z27__device_stub__vecFFTKernelPfS_i # -- Begin function _Z27__device_stub__vecFFTKernelPfS_i
.p2align 4, 0x90
.type _Z27__device_stub__vecFFTKernelPfS_i,@function
_Z27__device_stub__vecFFTKernelPfS_i: # @_Z27__device_stub__vecFFTKernelPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12vecFFTKernelPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z27__device_stub__vecFFTKernelPfS_i, .Lfunc_end0-_Z27__device_stub__vecFFTKernelPfS_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z6vecFFTPfS_i
.LCPI1_0:
.quad 0x3f70000000000000 # double 0.00390625
.text
.globl _Z6vecFFTPfS_i
.p2align 4, 0x90
.type _Z6vecFFTPfS_i,@function
_Z6vecFFTPfS_i: # @_Z6vecFFTPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %r15d
movq %rsi, %rbx
movq %rdi, %r12
cvtsi2sd %edx, %xmm0
mulsd .LCPI1_0(%rip), %xmm0
callq ceil@PLT
movsd %xmm0, 24(%rsp) # 8-byte Spill
leal (,%r15,4), %eax
movslq %eax, %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
cvttsd2si 24(%rsp), %rax # 8-byte Folded Reload
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl %r15d, 20(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12vecFFTKernelPfS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z6vecFFTPfS_i, .Lfunc_end1-_Z6vecFFTPfS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $128, %edi
callq malloc
movq %rax, %rbx
movl $128, %edi
callq malloc
movq %rax, %r14
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%rax,4)
incq %rax
cmpq $32, %rax
jne .LBB2_1
# %bb.2:
movq %rbx, %rdi
movq %r14, %rsi
movl $32, %edx
callq _Z6vecFFTPfS_i
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_3: # =>This Inner Loop Header: Depth=1
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r15
cmpq $32, %r15
jne .LBB2_3
# %bb.4:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12vecFFTKernelPfS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12vecFFTKernelPfS_i,@object # @_Z12vecFFTKernelPfS_i
.section .rodata,"a",@progbits
.globl _Z12vecFFTKernelPfS_i
.p2align 3, 0x0
_Z12vecFFTKernelPfS_i:
.quad _Z27__device_stub__vecFFTKernelPfS_i
.size _Z12vecFFTKernelPfS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f "
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12vecFFTKernelPfS_i"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__vecFFTKernelPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12vecFFTKernelPfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <cuda.h>
// Kernel that executes on the CUDA device
__global__ void temp_calc(float *a, float *b, int N, int edge) // a: Source array, b: Target array, N: Total size, edge: Length of edge
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Calculates the row and column number
int row = idx / (edge+1);
int col = idx - row * (edge+1);
if (idx<N) {
if(row>0 && row<edge && col>0 && col<edge) // Not on the edges
b[row*(edge+1)+col]=(a[(row-1)*(edge+1)+col]+a[(row+1)*(edge+1)+col]+a[row*(edge+1)+col-1]+a[row*(edge+1)+col+1])/4.0;
}
}
// main routine that executes on the host
int main(void)
{
//clock_t start = clock();
float *a_h, *a_d, *b_d; // Pointer to host & device arrays
int edge = 1000; // Can be changed
const int N = (edge+1) * (edge+1); // Number of elements in arrays
size_t size = N * sizeof(float);
a_h = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &a_d, size); // Allocate array a on device
cudaMalloc((void **) &b_d, size); // Allocate array b on device
// Initialize host array
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++){
if(i==0){
if(j>=10 && j<=30){
a_h[i*(edge+1)+j]=150.0;
}
else{
a_h[i*(edge+1)+j]=80.0;
}
}
else{
if(i==edge || j==0 || j==edge){
a_h[i*(edge+1)+j]=80.0;
}
else{
a_h[i*(edge+1)+j]=0.0;
}
}
}
}
// Initialize block size and block number
int block_size = 256;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
// Iteration
int iter = 500; // Can be changed
for (int i=0;i<iter;i++){
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); // Copy the host array to the CUDA
cudaMemcpy(b_d, a_h, size, cudaMemcpyHostToDevice);
temp_calc <<< n_blocks, block_size >>> (a_d, b_d, N, edge); // Calculate the values on CUDA
cudaMemcpy(a_h, b_d, sizeof(float)*N, cudaMemcpyDeviceToHost); // Retrieve result from device and store it in host array
}
/*
// Print results
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++)
printf("%f ", a_h[i*(edge+1)+j]);
printf("\n");
}
clock_t end = (clock() - start)/1000;
printf("time: %ldms\n", end);
*/
// Cleanup
free(a_h); cudaFree(a_d);
} | code for sm_80
Function : _Z9temp_calcPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff057624 */
/* 0x000fe200078e00ff */
/*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0030*/ IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105057810 */
/* 0x000fe20007ffe0ff */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e260000002100 */
/*0050*/ IABS R9, R5 ; /* 0x0000000500097213 */
/* 0x000fc80000000000 */
/*0060*/ I2F.RP R4, R9 ; /* 0x0000000900047306 */
/* 0x000e700000209400 */
/*0070*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x002e620000001000 */
/*0080*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */
/* 0x001fe200078e0207 */
/*0090*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x002fc80007ffe0ff */
/*00a0*/ IABS R4, R0 ; /* 0x0000000000047213 */
/* 0x000fe40000000000 */
/*00b0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*00c0*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*00d0*/ IMAD.MOV R6, RZ, RZ, -R3 ; /* 0x000000ffff067224 */
/* 0x002fc800078e0a03 */
/*00e0*/ IMAD R7, R6, R9, RZ ; /* 0x0000000906077224 */
/* 0x000fe200078e02ff */
/*00f0*/ IABS R6, R5 ; /* 0x0000000500067213 */
/* 0x000fc80000000000 */
/*0100*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fc800078e0002 */
/*0110*/ IMAD.MOV R2, RZ, RZ, -R6 ; /* 0x000000ffff027224 */
/* 0x000fe400078e0a06 */
/*0120*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0130*/ IMAD R2, R3, R2, R4 ; /* 0x0000000203027224 */
/* 0x000fca00078e0204 */
/*0140*/ ISETP.GT.U32.AND P2, PT, R9, R2, PT ; /* 0x000000020900720c */
/* 0x000fda0003f44070 */
/*0150*/ @!P2 IMAD.IADD R2, R2, 0x1, -R9 ; /* 0x000000010202a824 */
/* 0x000fe200078e0a09 */
/*0160*/ @!P2 IADD3 R3, R3, 0x1, RZ ; /* 0x000000010303a810 */
/* 0x000fe40007ffe0ff */
/*0170*/ ISETP.NE.AND P2, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f45270 */
/*0180*/ ISETP.GE.U32.AND P0, PT, R2, R9, PT ; /* 0x000000090200720c */
/* 0x000fe40003f06070 */
/*0190*/ LOP3.LUT R2, R0, R5, RZ, 0x3c, !PT ; /* 0x0000000500027212 */
/* 0x000fc800078e3cff */
/*01a0*/ ISETP.GE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fce0003f26270 */
/*01b0*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fc80003f06270 */
/*01d0*/ @!P1 IADD3 R3, -R3, RZ, RZ ; /* 0x000000ff03039210 */
/* 0x000fe40007ffe1ff */
/*01e0*/ @!P2 LOP3.LUT R3, RZ, R5, RZ, 0x33, !PT ; /* 0x00000005ff03a212 */
/* 0x000fce00078e33ff */
/*01f0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0200*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fe20003f06270 */
/*0210*/ IMAD R2, R5, R3, RZ ; /* 0x0000000305027224 */
/* 0x000fc600078e02ff */
/*0220*/ ISETP.LT.OR P0, PT, R3, 0x1, P0 ; /* 0x000000010300780c */
/* 0x000fe20000701670 */
/*0230*/ IMAD.IADD R3, R0, 0x1, -R2 ; /* 0x0000000100037824 */
/* 0x000fca00078e0a02 */
/*0240*/ ISETP.LT.OR P0, PT, R3, 0x1, P0 ; /* 0x000000010300780c */
/* 0x000fc80000701670 */
/*0250*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x174], P0 ; /* 0x00005d0003007a0c */
/* 0x000fda0000706670 */
/*0260*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0270*/ IADD3 R2, R3, R2, -R5 ; /* 0x0000000203027210 */
/* 0x000fe20007ffe805 */
/*0280*/ IMAD.MOV.U32 R13, RZ, RZ, 0x4 ; /* 0x00000004ff0d7424 */
/* 0x000fe200078e00ff */
/*0290*/ IADD3 R6, R0, -0x1, RZ ; /* 0xffffffff00067810 */
/* 0x000fe20007ffe0ff */
/*02a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*02b0*/ LEA R5, R5, R2, 0x1 ; /* 0x0000000205057211 */
/* 0x000fe200078e08ff */
/*02c0*/ IMAD.WIDE R2, R2, R13, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc800078e020d */
/*02d0*/ IMAD.WIDE R4, R5, R13.reuse, c[0x0][0x160] ; /* 0x0000580005047625 */
/* 0x080fe400078e020d */
/*02e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea4000c1e1900 */
/*02f0*/ IMAD.WIDE R6, R6, R13, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fe400078e020d */
/*0300*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0310*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */
/* 0x000ee8000c1e1900 */
/*0320*/ LDG.E R11, [R6.64+0x8] ; /* 0x00000804060b7981 */
/* 0x000f22000c1e1900 */
/*0330*/ FADD R8, R4, R3 ; /* 0x0000000304087221 */
/* 0x004fc80000000000 */
/*0340*/ FADD R8, R8, R9 ; /* 0x0000000908087221 */
/* 0x008fc80000000000 */
/*0350*/ FADD R11, R8, R11 ; /* 0x0000000b080b7221 */
/* 0x010fe40000000000 */
/*0360*/ IMAD.WIDE R8, R0, R13, c[0x0][0x168] ; /* 0x00005a0000087625 */
/* 0x000fc800078e020d */
/*0370*/ FMUL R11, R11, 0.25 ; /* 0x3e8000000b0b7820 */
/* 0x000fca0000400000 */
/*0380*/ STG.E [R8.64], R11 ; /* 0x0000000b08007986 */
/* 0x000fe2000c101904 */
/*0390*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03a0*/ BRA 0x3a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <cuda.h>
// Kernel that executes on the CUDA device
__global__ void temp_calc(float *a, float *b, int N, int edge) // a: Source array, b: Target array, N: Total size, edge: Length of edge
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Calculates the row and column number
int row = idx / (edge+1);
int col = idx - row * (edge+1);
if (idx<N) {
if(row>0 && row<edge && col>0 && col<edge) // Not on the edges
b[row*(edge+1)+col]=(a[(row-1)*(edge+1)+col]+a[(row+1)*(edge+1)+col]+a[row*(edge+1)+col-1]+a[row*(edge+1)+col+1])/4.0;
}
}
// main routine that executes on the host
int main(void)
{
//clock_t start = clock();
float *a_h, *a_d, *b_d; // Pointer to host & device arrays
int edge = 1000; // Can be changed
const int N = (edge+1) * (edge+1); // Number of elements in arrays
size_t size = N * sizeof(float);
a_h = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &a_d, size); // Allocate array a on device
cudaMalloc((void **) &b_d, size); // Allocate array b on device
// Initialize host array
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++){
if(i==0){
if(j>=10 && j<=30){
a_h[i*(edge+1)+j]=150.0;
}
else{
a_h[i*(edge+1)+j]=80.0;
}
}
else{
if(i==edge || j==0 || j==edge){
a_h[i*(edge+1)+j]=80.0;
}
else{
a_h[i*(edge+1)+j]=0.0;
}
}
}
}
// Initialize block size and block number
int block_size = 256;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
// Iteration
int iter = 500; // Can be changed
for (int i=0;i<iter;i++){
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); // Copy the host array to the CUDA
cudaMemcpy(b_d, a_h, size, cudaMemcpyHostToDevice);
temp_calc <<< n_blocks, block_size >>> (a_d, b_d, N, edge); // Calculate the values on CUDA
cudaMemcpy(a_h, b_d, sizeof(float)*N, cudaMemcpyDeviceToHost); // Retrieve result from device and store it in host array
}
/*
// Print results
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++)
printf("%f ", a_h[i*(edge+1)+j]);
printf("\n");
}
clock_t end = (clock() - start)/1000;
printf("time: %ldms\n", end);
*/
// Cleanup
free(a_h); cudaFree(a_d);
} | .file "tmpxft_001414ec_00000000-6_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9temp_calcPfS_iiPfS_ii
.type _Z32__device_stub__Z9temp_calcPfS_iiPfS_ii, @function
_Z32__device_stub__Z9temp_calcPfS_iiPfS_ii:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9temp_calcPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z32__device_stub__Z9temp_calcPfS_iiPfS_ii, .-_Z32__device_stub__Z9temp_calcPfS_iiPfS_ii
.globl _Z9temp_calcPfS_ii
.type _Z9temp_calcPfS_ii, @function
_Z9temp_calcPfS_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9temp_calcPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9temp_calcPfS_ii, .-_Z9temp_calcPfS_ii
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $56, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $4008004, %edi
call malloc@PLT
movq %rax, %rbx
movq %rsp, %rdi
movl $4008004, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $4008004, %esi
call cudaMalloc@PLT
movl $0, %r8d
movl $0, %esi
movss .LC1(%rip), %xmm0
movaps %xmm0, %xmm1
jmp .L12
.L31:
leal -10(%rax), %ecx
cmpl $20, %ecx
jbe .L29
movss %xmm1, (%rbx,%rax,4)
jmp .L16
.L29:
movl $0x43160000, (%rbx,%rax,4)
jmp .L15
.L25:
movslq %edx, %rcx
movss %xmm0, (%rbx,%rcx,4)
.L16:
addq $1, %rax
addl $1, %edx
cmpq $1001, %rax
je .L30
.L21:
testl %esi, %esi
je .L31
testl %eax, %eax
sete %cl
cmpl $1000, %eax
sete %dil
orb %dil, %cl
jne .L25
cmpl $1000, %esi
je .L25
movslq %edx, %rcx
movl $0x00000000, (%rbx,%rcx,4)
.L15:
addq $1, %rax
addl $1, %edx
jmp .L21
.L30:
addl $1, %esi
addl $1001, %r8d
cmpl $1001, %esi
je .L24
.L12:
movl %r8d, %edx
movl $0, %eax
jmp .L21
.L24:
movl $500, %ebp
jmp .L20
.L22:
movl $2, %ecx
movl $4008004, %edx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
subl $1, %ebp
je .L32
.L20:
movl $1, %ecx
movl $4008004, %edx
movq %rbx, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4008004, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 28(%rsp)
movl $1, 32(%rsp)
movl $3915, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L22
movl $1000, %ecx
movl $1002001, %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z32__device_stub__Z9temp_calcPfS_iiPfS_ii
jmp .L22
.L32:
movq %rbx, %rdi
call free@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L33
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "_Z9temp_calcPfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9temp_calcPfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1117782016
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <cuda.h>
// Kernel that executes on the CUDA device
__global__ void temp_calc(float *a, float *b, int N, int edge) // a: Source array, b: Target array, N: Total size, edge: Length of edge
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Calculates the row and column number
int row = idx / (edge+1);
int col = idx - row * (edge+1);
if (idx<N) {
if(row>0 && row<edge && col>0 && col<edge) // Not on the edges
b[row*(edge+1)+col]=(a[(row-1)*(edge+1)+col]+a[(row+1)*(edge+1)+col]+a[row*(edge+1)+col-1]+a[row*(edge+1)+col+1])/4.0;
}
}
// main routine that executes on the host
int main(void)
{
//clock_t start = clock();
float *a_h, *a_d, *b_d; // Pointer to host & device arrays
int edge = 1000; // Can be changed
const int N = (edge+1) * (edge+1); // Number of elements in arrays
size_t size = N * sizeof(float);
a_h = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &a_d, size); // Allocate array a on device
cudaMalloc((void **) &b_d, size); // Allocate array b on device
// Initialize host array
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++){
if(i==0){
if(j>=10 && j<=30){
a_h[i*(edge+1)+j]=150.0;
}
else{
a_h[i*(edge+1)+j]=80.0;
}
}
else{
if(i==edge || j==0 || j==edge){
a_h[i*(edge+1)+j]=80.0;
}
else{
a_h[i*(edge+1)+j]=0.0;
}
}
}
}
// Initialize block size and block number
int block_size = 256;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
// Iteration
int iter = 500; // Can be changed
for (int i=0;i<iter;i++){
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); // Copy the host array to the CUDA
cudaMemcpy(b_d, a_h, size, cudaMemcpyHostToDevice);
temp_calc <<< n_blocks, block_size >>> (a_d, b_d, N, edge); // Calculate the values on CUDA
cudaMemcpy(a_h, b_d, sizeof(float)*N, cudaMemcpyDeviceToHost); // Retrieve result from device and store it in host array
}
/*
// Print results
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++)
printf("%f ", a_h[i*(edge+1)+j]);
printf("\n");
}
clock_t end = (clock() - start)/1000;
printf("time: %ldms\n", end);
*/
// Cleanup
free(a_h); cudaFree(a_d);
} | #include <stdio.h>
#include <hip/hip_runtime.h>
// Kernel that executes on the CUDA device
__global__ void temp_calc(float *a, float *b, int N, int edge) // a: Source array, b: Target array, N: Total size, edge: Length of edge
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Calculates the row and column number
int row = idx / (edge+1);
int col = idx - row * (edge+1);
if (idx<N) {
if(row>0 && row<edge && col>0 && col<edge) // Not on the edges
b[row*(edge+1)+col]=(a[(row-1)*(edge+1)+col]+a[(row+1)*(edge+1)+col]+a[row*(edge+1)+col-1]+a[row*(edge+1)+col+1])/4.0;
}
}
// main routine that executes on the host
int main(void)
{
//clock_t start = clock();
float *a_h, *a_d, *b_d; // Pointer to host & device arrays
int edge = 1000; // Can be changed
const int N = (edge+1) * (edge+1); // Number of elements in arrays
size_t size = N * sizeof(float);
a_h = (float *)malloc(size); // Allocate array on host
hipMalloc((void **) &a_d, size); // Allocate array a on device
hipMalloc((void **) &b_d, size); // Allocate array b on device
// Initialize host array
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++){
if(i==0){
if(j>=10 && j<=30){
a_h[i*(edge+1)+j]=150.0;
}
else{
a_h[i*(edge+1)+j]=80.0;
}
}
else{
if(i==edge || j==0 || j==edge){
a_h[i*(edge+1)+j]=80.0;
}
else{
a_h[i*(edge+1)+j]=0.0;
}
}
}
}
// Initialize block size and block number
int block_size = 256;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
// Iteration
int iter = 500; // Can be changed
for (int i=0;i<iter;i++){
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); // Copy the host array to the CUDA
hipMemcpy(b_d, a_h, size, hipMemcpyHostToDevice);
temp_calc <<< n_blocks, block_size >>> (a_d, b_d, N, edge); // Calculate the values on CUDA
hipMemcpy(a_h, b_d, sizeof(float)*N, hipMemcpyDeviceToHost); // Retrieve result from device and store it in host array
}
/*
// Print results
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++)
printf("%f ", a_h[i*(edge+1)+j]);
printf("\n");
}
clock_t end = (clock() - start)/1000;
printf("time: %ldms\n", end);
*/
// Cleanup
free(a_h); hipFree(a_d);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
// Kernel that executes on the CUDA device
__global__ void temp_calc(float *a, float *b, int N, int edge) // a: Source array, b: Target array, N: Total size, edge: Length of edge
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Calculates the row and column number
int row = idx / (edge+1);
int col = idx - row * (edge+1);
if (idx<N) {
if(row>0 && row<edge && col>0 && col<edge) // Not on the edges
b[row*(edge+1)+col]=(a[(row-1)*(edge+1)+col]+a[(row+1)*(edge+1)+col]+a[row*(edge+1)+col-1]+a[row*(edge+1)+col+1])/4.0;
}
}
// main routine that executes on the host
int main(void)
{
//clock_t start = clock();
float *a_h, *a_d, *b_d; // Pointer to host & device arrays
int edge = 1000; // Can be changed
const int N = (edge+1) * (edge+1); // Number of elements in arrays
size_t size = N * sizeof(float);
a_h = (float *)malloc(size); // Allocate array on host
hipMalloc((void **) &a_d, size); // Allocate array a on device
hipMalloc((void **) &b_d, size); // Allocate array b on device
// Initialize host array
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++){
if(i==0){
if(j>=10 && j<=30){
a_h[i*(edge+1)+j]=150.0;
}
else{
a_h[i*(edge+1)+j]=80.0;
}
}
else{
if(i==edge || j==0 || j==edge){
a_h[i*(edge+1)+j]=80.0;
}
else{
a_h[i*(edge+1)+j]=0.0;
}
}
}
}
// Initialize block size and block number
int block_size = 256;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
// Iteration
int iter = 500; // Can be changed
for (int i=0;i<iter;i++){
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); // Copy the host array to the CUDA
hipMemcpy(b_d, a_h, size, hipMemcpyHostToDevice);
temp_calc <<< n_blocks, block_size >>> (a_d, b_d, N, edge); // Calculate the values on CUDA
hipMemcpy(a_h, b_d, sizeof(float)*N, hipMemcpyDeviceToHost); // Retrieve result from device and store it in host array
}
/*
// Print results
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++)
printf("%f ", a_h[i*(edge+1)+j]);
printf("\n");
}
clock_t end = (clock() - start)/1000;
printf("time: %ldms\n", end);
*/
// Cleanup
free(a_h); hipFree(a_d);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9temp_calcPfS_ii
.globl _Z9temp_calcPfS_ii
.p2align 8
.type _Z9temp_calcPfS_ii,@function
_Z9temp_calcPfS_ii:
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x10
s_load_b32 s2, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_add_i32 s5, s7, 1
s_and_b32 s2, s2, 0xffff
s_ashr_i32 s3, s5, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s4, s5, s3
s_xor_b32 s4, s4, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f32_u32_e32 v1, s4
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
s_delay_alu instid0(VALU_DEP_1)
v_cvt_u32_f32_e32 v3, v1
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_sub_i32 s2, 0, s4
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v0, s2, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
v_mul_hi_u32 v0, v3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v4, v1, v2
v_xor_b32_e32 v4, v4, v2
v_xor_b32_e32 v2, s3, v2
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v3, v0
v_mul_hi_u32 v0, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v0, s4
v_sub_nc_u32_e32 v3, v4, v3
v_add_nc_u32_e32 v4, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s4, v3
v_cmp_le_u32_e32 vcc_lo, s4, v3
v_dual_cndmask_b32 v3, v3, v5 :: v_dual_cndmask_b32 v0, v0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_le_u32_e32 vcc_lo, s4, v3
v_add_nc_u32_e32 v4, 1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v0, v0, v4, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, s6, v1
v_xor_b32_e32 v0, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v3, v0, v2
v_mul_lo_u32 v2, v3, s5
v_cmp_lt_i32_e64 s2, 0, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
s_and_b32 s2, s2, vcc_lo
v_sub_nc_u32_e32 v0, v1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_max_i32_e32 v4, v3, v0
v_cmp_lt_i32_e64 s3, 0, v0
v_cmp_gt_i32_e64 s4, s7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s4, s3
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v4, -1, v3
s_load_b128 s[0:3], s[0:1], 0x0
v_add3_u32 v3, v2, s5, v0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[5:6], null, v4, s5, v[0:1]
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_lshlrev_b64 v[3:4], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s0, v5
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v6, vcc_lo
v_add_co_u32 v6, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v1, vcc_lo
s_clause 0x3
global_load_b32 v4, v[4:5], off
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[6:7], off offset:-4
global_load_b32 v5, v[6:7], off offset:4
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(2)
v_add_f32_e32 v2, v4, v2
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_f32_e32 v2, v2, v3
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v5
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v2, 0x3e800000, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9temp_calcPfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9temp_calcPfS_ii, .Lfunc_end0-_Z9temp_calcPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9temp_calcPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9temp_calcPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
// Kernel that executes on the CUDA device
__global__ void temp_calc(float *a, float *b, int N, int edge) // a: Source array, b: Target array, N: Total size, edge: Length of edge
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Calculates the row and column number
int row = idx / (edge+1);
int col = idx - row * (edge+1);
if (idx<N) {
if(row>0 && row<edge && col>0 && col<edge) // Not on the edges
b[row*(edge+1)+col]=(a[(row-1)*(edge+1)+col]+a[(row+1)*(edge+1)+col]+a[row*(edge+1)+col-1]+a[row*(edge+1)+col+1])/4.0;
}
}
// main routine that executes on the host
int main(void)
{
//clock_t start = clock();
float *a_h, *a_d, *b_d; // Pointer to host & device arrays
int edge = 1000; // Can be changed
const int N = (edge+1) * (edge+1); // Number of elements in arrays
size_t size = N * sizeof(float);
a_h = (float *)malloc(size); // Allocate array on host
hipMalloc((void **) &a_d, size); // Allocate array a on device
hipMalloc((void **) &b_d, size); // Allocate array b on device
// Initialize host array
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++){
if(i==0){
if(j>=10 && j<=30){
a_h[i*(edge+1)+j]=150.0;
}
else{
a_h[i*(edge+1)+j]=80.0;
}
}
else{
if(i==edge || j==0 || j==edge){
a_h[i*(edge+1)+j]=80.0;
}
else{
a_h[i*(edge+1)+j]=0.0;
}
}
}
}
// Initialize block size and block number
int block_size = 256;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
// Iteration
int iter = 500; // Can be changed
for (int i=0;i<iter;i++){
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); // Copy the host array to the CUDA
hipMemcpy(b_d, a_h, size, hipMemcpyHostToDevice);
temp_calc <<< n_blocks, block_size >>> (a_d, b_d, N, edge); // Calculate the values on CUDA
hipMemcpy(a_h, b_d, sizeof(float)*N, hipMemcpyDeviceToHost); // Retrieve result from device and store it in host array
}
/*
// Print results
for (int i=0; i<=edge; i++) {
for (int j=0; j<=edge; j++)
printf("%f ", a_h[i*(edge+1)+j]);
printf("\n");
}
clock_t end = (clock() - start)/1000;
printf("time: %ldms\n", end);
*/
// Cleanup
free(a_h); hipFree(a_d);
} | .text
.file "cuda.hip"
.globl _Z24__device_stub__temp_calcPfS_ii # -- Begin function _Z24__device_stub__temp_calcPfS_ii
.p2align 4, 0x90
.type _Z24__device_stub__temp_calcPfS_ii,@function
_Z24__device_stub__temp_calcPfS_ii: # @_Z24__device_stub__temp_calcPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9temp_calcPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__temp_calcPfS_ii, .Lfunc_end0-_Z24__device_stub__temp_calcPfS_ii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $136, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $4008004, %edi # imm = 0x3D2844
callq malloc
movq %rax, %rbx
leaq 16(%rsp), %rdi
movl $4008004, %esi # imm = 0x3D2844
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4008004, %esi # imm = 0x3D2844
callq hipMalloc
xorl %eax, %eax
movq %rbx, %rcx
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_6: # in Loop: Header=BB1_1 Depth=1
incq %rax
addq $4004, %rcx # imm = 0xFA4
cmpq $1001, %rax # imm = 0x3E9
je .LBB1_7
.LBB1_1: # %.preheader65
# =>This Loop Header: Depth=1
# Child Loop BB1_2 Depth 2
xorl %edx, %edx
jmp .LBB1_2
.p2align 4, 0x90
.LBB1_15: # in Loop: Header=BB1_2 Depth=2
movl $1117782016, (%rcx,%rdx,4) # imm = 0x42A00000
.LBB1_17: # in Loop: Header=BB1_2 Depth=2
incq %rdx
cmpq $1001, %rdx # imm = 0x3E9
je .LBB1_6
.LBB1_2: # Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $1000, %rax # imm = 0x3E8
je .LBB1_15
# %bb.3: # in Loop: Header=BB1_2 Depth=2
testq %rax, %rax
jne .LBB1_13
# %bb.4: # in Loop: Header=BB1_2 Depth=2
leal -10(%rdx), %esi
cmpl $20, %esi
ja .LBB1_12
# %bb.5: # in Loop: Header=BB1_2 Depth=2
movl $1125515264, (%rbx,%rdx,4) # imm = 0x43160000
jmp .LBB1_17
.p2align 4, 0x90
.LBB1_13: # %switch.early.test
# in Loop: Header=BB1_2 Depth=2
cmpl $1000, %edx # imm = 0x3E8
je .LBB1_15
# %bb.14: # %switch.early.test
# in Loop: Header=BB1_2 Depth=2
testl %edx, %edx
je .LBB1_15
# %bb.16: # in Loop: Header=BB1_2 Depth=2
movl $0, (%rcx,%rdx,4)
jmp .LBB1_17
.LBB1_12: # in Loop: Header=BB1_2 Depth=2
movl $1117782016, (%rbx,%rdx,4) # imm = 0x42A00000
jmp .LBB1_17
.LBB1_7: # %.preheader
movl $500, %ebp # imm = 0x1F4
movabsq $4294967552, %r14 # imm = 0x100000100
leaq 3659(%r14), %r15
leaq 32(%rsp), %r12
leaq 96(%rsp), %r13
jmp .LBB1_8
.p2align 4, 0x90
.LBB1_10: # in Loop: Header=BB1_8 Depth=1
movq 8(%rsp), %rsi
movl $4008004, %edx # imm = 0x3D2844
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
decl %ebp
je .LBB1_11
.LBB1_8: # =>This Inner Loop Header: Depth=1
movq 16(%rsp), %rdi
movl $4008004, %edx # imm = 0x3D2844
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $4008004, %edx # imm = 0x3D2844
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_10
# %bb.9: # in Loop: Header=BB1_8 Depth=1
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl $1002001, 28(%rsp) # imm = 0xF4A11
movl $1000, 24(%rsp) # imm = 0x3E8
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 24(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
movq %r12, %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
movl $_Z9temp_calcPfS_ii, %edi
movq %r13, %r9
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_10
.LBB1_11:
movq %rbx, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9temp_calcPfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9temp_calcPfS_ii,@object # @_Z9temp_calcPfS_ii
.section .rodata,"a",@progbits
.globl _Z9temp_calcPfS_ii
.p2align 3, 0x0
_Z9temp_calcPfS_ii:
.quad _Z24__device_stub__temp_calcPfS_ii
.size _Z9temp_calcPfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9temp_calcPfS_ii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__temp_calcPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9temp_calcPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9temp_calcPfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x174] ; /* 0x00005d00ff057624 */
/* 0x000fe200078e00ff */
/*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0030*/ IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105057810 */
/* 0x000fe20007ffe0ff */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e260000002100 */
/*0050*/ IABS R9, R5 ; /* 0x0000000500097213 */
/* 0x000fc80000000000 */
/*0060*/ I2F.RP R4, R9 ; /* 0x0000000900047306 */
/* 0x000e700000209400 */
/*0070*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x002e620000001000 */
/*0080*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */
/* 0x001fe200078e0207 */
/*0090*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x002fc80007ffe0ff */
/*00a0*/ IABS R4, R0 ; /* 0x0000000000047213 */
/* 0x000fe40000000000 */
/*00b0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*00c0*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*00d0*/ IMAD.MOV R6, RZ, RZ, -R3 ; /* 0x000000ffff067224 */
/* 0x002fc800078e0a03 */
/*00e0*/ IMAD R7, R6, R9, RZ ; /* 0x0000000906077224 */
/* 0x000fe200078e02ff */
/*00f0*/ IABS R6, R5 ; /* 0x0000000500067213 */
/* 0x000fc80000000000 */
/*0100*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fc800078e0002 */
/*0110*/ IMAD.MOV R2, RZ, RZ, -R6 ; /* 0x000000ffff027224 */
/* 0x000fe400078e0a06 */
/*0120*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0130*/ IMAD R2, R3, R2, R4 ; /* 0x0000000203027224 */
/* 0x000fca00078e0204 */
/*0140*/ ISETP.GT.U32.AND P2, PT, R9, R2, PT ; /* 0x000000020900720c */
/* 0x000fda0003f44070 */
/*0150*/ @!P2 IMAD.IADD R2, R2, 0x1, -R9 ; /* 0x000000010202a824 */
/* 0x000fe200078e0a09 */
/*0160*/ @!P2 IADD3 R3, R3, 0x1, RZ ; /* 0x000000010303a810 */
/* 0x000fe40007ffe0ff */
/*0170*/ ISETP.NE.AND P2, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f45270 */
/*0180*/ ISETP.GE.U32.AND P0, PT, R2, R9, PT ; /* 0x000000090200720c */
/* 0x000fe40003f06070 */
/*0190*/ LOP3.LUT R2, R0, R5, RZ, 0x3c, !PT ; /* 0x0000000500027212 */
/* 0x000fc800078e3cff */
/*01a0*/ ISETP.GE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fce0003f26270 */
/*01b0*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fc80003f06270 */
/*01d0*/ @!P1 IADD3 R3, -R3, RZ, RZ ; /* 0x000000ff03039210 */
/* 0x000fe40007ffe1ff */
/*01e0*/ @!P2 LOP3.LUT R3, RZ, R5, RZ, 0x33, !PT ; /* 0x00000005ff03a212 */
/* 0x000fce00078e33ff */
/*01f0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0200*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fe20003f06270 */
/*0210*/ IMAD R2, R5, R3, RZ ; /* 0x0000000305027224 */
/* 0x000fc600078e02ff */
/*0220*/ ISETP.LT.OR P0, PT, R3, 0x1, P0 ; /* 0x000000010300780c */
/* 0x000fe20000701670 */
/*0230*/ IMAD.IADD R3, R0, 0x1, -R2 ; /* 0x0000000100037824 */
/* 0x000fca00078e0a02 */
/*0240*/ ISETP.LT.OR P0, PT, R3, 0x1, P0 ; /* 0x000000010300780c */
/* 0x000fc80000701670 */
/*0250*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x174], P0 ; /* 0x00005d0003007a0c */
/* 0x000fda0000706670 */
/*0260*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0270*/ IADD3 R2, R3, R2, -R5 ; /* 0x0000000203027210 */
/* 0x000fe20007ffe805 */
/*0280*/ IMAD.MOV.U32 R13, RZ, RZ, 0x4 ; /* 0x00000004ff0d7424 */
/* 0x000fe200078e00ff */
/*0290*/ IADD3 R6, R0, -0x1, RZ ; /* 0xffffffff00067810 */
/* 0x000fe20007ffe0ff */
/*02a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*02b0*/ LEA R5, R5, R2, 0x1 ; /* 0x0000000205057211 */
/* 0x000fe200078e08ff */
/*02c0*/ IMAD.WIDE R2, R2, R13, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc800078e020d */
/*02d0*/ IMAD.WIDE R4, R5, R13.reuse, c[0x0][0x160] ; /* 0x0000580005047625 */
/* 0x080fe400078e020d */
/*02e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea4000c1e1900 */
/*02f0*/ IMAD.WIDE R6, R6, R13, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fe400078e020d */
/*0300*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0310*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */
/* 0x000ee8000c1e1900 */
/*0320*/ LDG.E R11, [R6.64+0x8] ; /* 0x00000804060b7981 */
/* 0x000f22000c1e1900 */
/*0330*/ FADD R8, R4, R3 ; /* 0x0000000304087221 */
/* 0x004fc80000000000 */
/*0340*/ FADD R8, R8, R9 ; /* 0x0000000908087221 */
/* 0x008fc80000000000 */
/*0350*/ FADD R11, R8, R11 ; /* 0x0000000b080b7221 */
/* 0x010fe40000000000 */
/*0360*/ IMAD.WIDE R8, R0, R13, c[0x0][0x168] ; /* 0x00005a0000087625 */
/* 0x000fc800078e020d */
/*0370*/ FMUL R11, R11, 0.25 ; /* 0x3e8000000b0b7820 */
/* 0x000fca0000400000 */
/*0380*/ STG.E [R8.64], R11 ; /* 0x0000000b08007986 */
/* 0x000fe2000c101904 */
/*0390*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03a0*/ BRA 0x3a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9temp_calcPfS_ii
.globl _Z9temp_calcPfS_ii
.p2align 8
.type _Z9temp_calcPfS_ii,@function
_Z9temp_calcPfS_ii:
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x10
s_load_b32 s2, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_add_i32 s5, s7, 1
s_and_b32 s2, s2, 0xffff
s_ashr_i32 s3, s5, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s4, s5, s3
s_xor_b32 s4, s4, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f32_u32_e32 v1, s4
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
s_delay_alu instid0(VALU_DEP_1)
v_cvt_u32_f32_e32 v3, v1
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_sub_i32 s2, 0, s4
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_mul_lo_u32 v0, s2, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
v_mul_hi_u32 v0, v3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v4, v1, v2
v_xor_b32_e32 v4, v4, v2
v_xor_b32_e32 v2, s3, v2
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v3, v0
v_mul_hi_u32 v0, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v0, s4
v_sub_nc_u32_e32 v3, v4, v3
v_add_nc_u32_e32 v4, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s4, v3
v_cmp_le_u32_e32 vcc_lo, s4, v3
v_dual_cndmask_b32 v3, v3, v5 :: v_dual_cndmask_b32 v0, v0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_le_u32_e32 vcc_lo, s4, v3
v_add_nc_u32_e32 v4, 1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v0, v0, v4, vcc_lo
v_cmp_gt_i32_e32 vcc_lo, s6, v1
v_xor_b32_e32 v0, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v3, v0, v2
v_mul_lo_u32 v2, v3, s5
v_cmp_lt_i32_e64 s2, 0, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
s_and_b32 s2, s2, vcc_lo
v_sub_nc_u32_e32 v0, v1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_max_i32_e32 v4, v3, v0
v_cmp_lt_i32_e64 s3, 0, v0
v_cmp_gt_i32_e64 s4, s7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s4, s3
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v4, -1, v3
s_load_b128 s[0:3], s[0:1], 0x0
v_add3_u32 v3, v2, s5, v0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[5:6], null, v4, s5, v[0:1]
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_lshlrev_b64 v[3:4], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s0, v3
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s0, v5
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v6, vcc_lo
v_add_co_u32 v6, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v1, vcc_lo
s_clause 0x3
global_load_b32 v4, v[4:5], off
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[6:7], off offset:-4
global_load_b32 v5, v[6:7], off offset:4
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(2)
v_add_f32_e32 v2, v4, v2
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_f32_e32 v2, v2, v3
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v5
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v2, 0x3e800000, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9temp_calcPfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9temp_calcPfS_ii, .Lfunc_end0-_Z9temp_calcPfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9temp_calcPfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9temp_calcPfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001414ec_00000000-6_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9temp_calcPfS_iiPfS_ii
.type _Z32__device_stub__Z9temp_calcPfS_iiPfS_ii, @function
_Z32__device_stub__Z9temp_calcPfS_iiPfS_ii:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9temp_calcPfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z32__device_stub__Z9temp_calcPfS_iiPfS_ii, .-_Z32__device_stub__Z9temp_calcPfS_iiPfS_ii
.globl _Z9temp_calcPfS_ii
.type _Z9temp_calcPfS_ii, @function
_Z9temp_calcPfS_ii:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9temp_calcPfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9temp_calcPfS_ii, .-_Z9temp_calcPfS_ii
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $56, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $4008004, %edi
call malloc@PLT
movq %rax, %rbx
movq %rsp, %rdi
movl $4008004, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $4008004, %esi
call cudaMalloc@PLT
movl $0, %r8d
movl $0, %esi
movss .LC1(%rip), %xmm0
movaps %xmm0, %xmm1
jmp .L12
.L31:
leal -10(%rax), %ecx
cmpl $20, %ecx
jbe .L29
movss %xmm1, (%rbx,%rax,4)
jmp .L16
.L29:
movl $0x43160000, (%rbx,%rax,4)
jmp .L15
.L25:
movslq %edx, %rcx
movss %xmm0, (%rbx,%rcx,4)
.L16:
addq $1, %rax
addl $1, %edx
cmpq $1001, %rax
je .L30
.L21:
testl %esi, %esi
je .L31
testl %eax, %eax
sete %cl
cmpl $1000, %eax
sete %dil
orb %dil, %cl
jne .L25
cmpl $1000, %esi
je .L25
movslq %edx, %rcx
movl $0x00000000, (%rbx,%rcx,4)
.L15:
addq $1, %rax
addl $1, %edx
jmp .L21
.L30:
addl $1, %esi
addl $1001, %r8d
cmpl $1001, %esi
je .L24
.L12:
movl %r8d, %edx
movl $0, %eax
jmp .L21
.L24:
movl $500, %ebp
jmp .L20
.L22:
movl $2, %ecx
movl $4008004, %edx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
subl $1, %ebp
je .L32
.L20:
movl $1, %ecx
movl $4008004, %edx
movq %rbx, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4008004, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 28(%rsp)
movl $1, 32(%rsp)
movl $3915, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L22
movl $1000, %ecx
movl $1002001, %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z32__device_stub__Z9temp_calcPfS_iiPfS_ii
jmp .L22
.L32:
movq %rbx, %rdi
call free@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L33
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "_Z9temp_calcPfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9temp_calcPfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1117782016
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda.hip"
.globl _Z24__device_stub__temp_calcPfS_ii # -- Begin function _Z24__device_stub__temp_calcPfS_ii
.p2align 4, 0x90
.type _Z24__device_stub__temp_calcPfS_ii,@function
_Z24__device_stub__temp_calcPfS_ii: # @_Z24__device_stub__temp_calcPfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9temp_calcPfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__temp_calcPfS_ii, .Lfunc_end0-_Z24__device_stub__temp_calcPfS_ii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $136, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $4008004, %edi # imm = 0x3D2844
callq malloc
movq %rax, %rbx
leaq 16(%rsp), %rdi
movl $4008004, %esi # imm = 0x3D2844
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4008004, %esi # imm = 0x3D2844
callq hipMalloc
xorl %eax, %eax
movq %rbx, %rcx
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_6: # in Loop: Header=BB1_1 Depth=1
incq %rax
addq $4004, %rcx # imm = 0xFA4
cmpq $1001, %rax # imm = 0x3E9
je .LBB1_7
.LBB1_1: # %.preheader65
# =>This Loop Header: Depth=1
# Child Loop BB1_2 Depth 2
xorl %edx, %edx
jmp .LBB1_2
.p2align 4, 0x90
.LBB1_15: # in Loop: Header=BB1_2 Depth=2
movl $1117782016, (%rcx,%rdx,4) # imm = 0x42A00000
.LBB1_17: # in Loop: Header=BB1_2 Depth=2
incq %rdx
cmpq $1001, %rdx # imm = 0x3E9
je .LBB1_6
.LBB1_2: # Parent Loop BB1_1 Depth=1
# => This Inner Loop Header: Depth=2
cmpq $1000, %rax # imm = 0x3E8
je .LBB1_15
# %bb.3: # in Loop: Header=BB1_2 Depth=2
testq %rax, %rax
jne .LBB1_13
# %bb.4: # in Loop: Header=BB1_2 Depth=2
leal -10(%rdx), %esi
cmpl $20, %esi
ja .LBB1_12
# %bb.5: # in Loop: Header=BB1_2 Depth=2
movl $1125515264, (%rbx,%rdx,4) # imm = 0x43160000
jmp .LBB1_17
.p2align 4, 0x90
.LBB1_13: # %switch.early.test
# in Loop: Header=BB1_2 Depth=2
cmpl $1000, %edx # imm = 0x3E8
je .LBB1_15
# %bb.14: # %switch.early.test
# in Loop: Header=BB1_2 Depth=2
testl %edx, %edx
je .LBB1_15
# %bb.16: # in Loop: Header=BB1_2 Depth=2
movl $0, (%rcx,%rdx,4)
jmp .LBB1_17
.LBB1_12: # in Loop: Header=BB1_2 Depth=2
movl $1117782016, (%rbx,%rdx,4) # imm = 0x42A00000
jmp .LBB1_17
.LBB1_7: # %.preheader
movl $500, %ebp # imm = 0x1F4
movabsq $4294967552, %r14 # imm = 0x100000100
leaq 3659(%r14), %r15
leaq 32(%rsp), %r12
leaq 96(%rsp), %r13
jmp .LBB1_8
.p2align 4, 0x90
.LBB1_10: # in Loop: Header=BB1_8 Depth=1
movq 8(%rsp), %rsi
movl $4008004, %edx # imm = 0x3D2844
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
decl %ebp
je .LBB1_11
.LBB1_8: # =>This Inner Loop Header: Depth=1
movq 16(%rsp), %rdi
movl $4008004, %edx # imm = 0x3D2844
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $4008004, %edx # imm = 0x3D2844
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq %r15, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_10
# %bb.9: # in Loop: Header=BB1_8 Depth=1
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl $1002001, 28(%rsp) # imm = 0xF4A11
movl $1000, 24(%rsp) # imm = 0x3E8
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 24(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
movq %r12, %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
movl $_Z9temp_calcPfS_ii, %edi
movq %r13, %r9
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_10
.LBB1_11:
movq %rbx, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9temp_calcPfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9temp_calcPfS_ii,@object # @_Z9temp_calcPfS_ii
.section .rodata,"a",@progbits
.globl _Z9temp_calcPfS_ii
.p2align 3, 0x0
_Z9temp_calcPfS_ii:
.quad _Z24__device_stub__temp_calcPfS_ii
.size _Z9temp_calcPfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9temp_calcPfS_ii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__temp_calcPfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9temp_calcPfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | __device__ void MatrixInverse( void* param)
{
float* paramIn = (float*)param;
int N = (int)paramIn[0];
paramIn = paramIn+1;
float* A = paramIn;
float* B = paramIn+N*N;
int x = threadIdx.x;
if (x < N)
{
for (int y = 0; y < N; ++y)
{
float pivot = 0;
for (int i = 0; i < N; ++i)
{
pivot = A[N*y+i]/A[N*i+i];
if (y != i )
{
A[N*y+x] = A[N*y+x]-(pivot*A[N*i+x]);
B[N*y+x] = B[N*y+x]-(pivot*B[N*i+x]);
}
}
}
for (int y = 0; y < N; ++y)
{
for (int i = 0; i < N; ++i)
{
if (y == i)
{
B[N*y+x] /= A[N*y+y];
float div = A[N*y+y];
A[N*y+y] /= div;
}
}
}
}
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __device__ void MatrixInverse( void* param)
{
float* paramIn = (float*)param;
int N = (int)paramIn[0];
paramIn = paramIn+1;
float* A = paramIn;
float* B = paramIn+N*N;
int x = threadIdx.x;
if (x < N)
{
for (int y = 0; y < N; ++y)
{
float pivot = 0;
for (int i = 0; i < N; ++i)
{
pivot = A[N*y+i]/A[N*i+i];
if (y != i )
{
A[N*y+x] = A[N*y+x]-(pivot*A[N*i+x]);
B[N*y+x] = B[N*y+x]-(pivot*B[N*i+x]);
}
}
}
for (int y = 0; y < N; ++y)
{
for (int i = 0; i < N; ++i)
{
if (y == i)
{
B[N*y+x] /= A[N*y+y];
float div = A[N*y+y];
A[N*y+y] /= div;
}
}
}
}
} | .file "tmpxft_000553f3_00000000-6_MatrixInverse.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13MatrixInversePv
.type _Z13MatrixInversePv, @function
_Z13MatrixInversePv:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z13MatrixInversePv, .-_Z13MatrixInversePv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __device__ void MatrixInverse( void* param)
{
float* paramIn = (float*)param;
int N = (int)paramIn[0];
paramIn = paramIn+1;
float* A = paramIn;
float* B = paramIn+N*N;
int x = threadIdx.x;
if (x < N)
{
for (int y = 0; y < N; ++y)
{
float pivot = 0;
for (int i = 0; i < N; ++i)
{
pivot = A[N*y+i]/A[N*i+i];
if (y != i )
{
A[N*y+x] = A[N*y+x]-(pivot*A[N*i+x]);
B[N*y+x] = B[N*y+x]-(pivot*B[N*i+x]);
}
}
}
for (int y = 0; y < N; ++y)
{
for (int i = 0; i < N; ++i)
{
if (y == i)
{
B[N*y+x] /= A[N*y+y];
float div = A[N*y+y];
A[N*y+y] /= div;
}
}
}
}
} | #include <hip/hip_runtime.h>
__device__ void MatrixInverse( void* param)
{
float* paramIn = (float*)param;
int N = (int)paramIn[0];
paramIn = paramIn+1;
float* A = paramIn;
float* B = paramIn+N*N;
int x = threadIdx.x;
if (x < N)
{
for (int y = 0; y < N; ++y)
{
float pivot = 0;
for (int i = 0; i < N; ++i)
{
pivot = A[N*y+i]/A[N*i+i];
if (y != i )
{
A[N*y+x] = A[N*y+x]-(pivot*A[N*i+x]);
B[N*y+x] = B[N*y+x]-(pivot*B[N*i+x]);
}
}
}
for (int y = 0; y < N; ++y)
{
for (int i = 0; i < N; ++i)
{
if (y == i)
{
B[N*y+x] /= A[N*y+y];
float div = A[N*y+y];
A[N*y+y] /= div;
}
}
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__device__ void MatrixInverse( void* param)
{
float* paramIn = (float*)param;
int N = (int)paramIn[0];
paramIn = paramIn+1;
float* A = paramIn;
float* B = paramIn+N*N;
int x = threadIdx.x;
if (x < N)
{
for (int y = 0; y < N; ++y)
{
float pivot = 0;
for (int i = 0; i < N; ++i)
{
pivot = A[N*y+i]/A[N*i+i];
if (y != i )
{
A[N*y+x] = A[N*y+x]-(pivot*A[N*i+x]);
B[N*y+x] = B[N*y+x]-(pivot*B[N*i+x]);
}
}
}
for (int y = 0; y < N; ++y)
{
for (int i = 0; i < N; ++i)
{
if (y == i)
{
B[N*y+x] /= A[N*y+y];
float div = A[N*y+y];
A[N*y+y] /= div;
}
}
}
}
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__device__ void MatrixInverse( void* param)
{
float* paramIn = (float*)param;
int N = (int)paramIn[0];
paramIn = paramIn+1;
float* A = paramIn;
float* B = paramIn+N*N;
int x = threadIdx.x;
if (x < N)
{
for (int y = 0; y < N; ++y)
{
float pivot = 0;
for (int i = 0; i < N; ++i)
{
pivot = A[N*y+i]/A[N*i+i];
if (y != i )
{
A[N*y+x] = A[N*y+x]-(pivot*A[N*i+x]);
B[N*y+x] = B[N*y+x]-(pivot*B[N*i+x]);
}
}
}
for (int y = 0; y < N; ++y)
{
for (int i = 0; i < N; ++i)
{
if (y == i)
{
B[N*y+x] /= A[N*y+y];
float div = A[N*y+y];
A[N*y+y] /= div;
}
}
}
}
} | .text
.file "MatrixInverse.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000553f3_00000000-6_MatrixInverse.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13MatrixInversePv
.type _Z13MatrixInversePv, @function
_Z13MatrixInversePv:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z13MatrixInversePv, .-_Z13MatrixInversePv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "MatrixInverse.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // CUDA运行时头文件
#include <cuda_runtime.h>
#include <chrono>
#include <stdio.h>
#include <string.h>
using namespace std;
#define checkRuntime(op) __check_cuda_runtime((op), #op, __FILE__, __LINE__)
bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, int line){
if(code != cudaSuccess){
const char* err_name = cudaGetErrorName(code);
const char* err_message = cudaGetErrorString(code);
printf("runtime error %s:%d %s failed. \n code = %s, message = %s\n", file, line, op, err_name, err_message);
return false;
}
return true;
}
__global__ void add_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] + b[index];
}
__global__ void mul_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] * b[index];
}
cudaStream_t stream1, stream2;
float *a, *b, *c1, *c2;
const int num_element = 100000;
const size_t bytes = sizeof(float) * num_element;
const int blocks = 512;
const int grids = (num_element + blocks - 1) / blocks;
const int ntry = 1000;
// 多个流异步
void async(){
cudaEvent_t event_start1, event_stop1;
cudaEvent_t event_start2, event_stop2;
checkRuntime(cudaEventCreate(&event_start1));
checkRuntime(cudaEventCreate(&event_stop1));
checkRuntime(cudaEventCreate(&event_start2));
checkRuntime(cudaEventCreate(&event_stop2));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(cudaEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
checkRuntime(cudaEventRecord(event_stop1, stream1));
checkRuntime(cudaEventRecord(event_start2, stream2));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
checkRuntime(cudaEventRecord(event_stop2, stream2));
checkRuntime(cudaStreamSynchronize(stream1));
checkRuntime(cudaStreamSynchronize(stream2));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1, time2;
checkRuntime(cudaEventElapsedTime(&time1, event_start1, event_stop1));
checkRuntime(cudaEventElapsedTime(&time2, event_start2, event_stop2));
printf("async: time1 = %.2f ms, time2 = %.2f ms, count = %.2f ms\n", time1, time2, toc - tic);
}
// 单个流串行
void sync(){
cudaEvent_t event_start1, event_stop1;
checkRuntime(cudaEventCreate(&event_start1));
checkRuntime(cudaEventCreate(&event_stop1));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(cudaEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
checkRuntime(cudaEventRecord(event_stop1, stream1));
checkRuntime(cudaStreamSynchronize(stream1));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1;
checkRuntime(cudaEventElapsedTime(&time1, event_start1, event_stop1));
printf("sync: time1 = %.2f ms, count = %.2f ms\n", time1, toc - tic);
}
// 多个流之间并行
void multi_stream_async(){
// 这个案例主要实现多个流之间互相等待,使用event控制实现
// 存在step1 -> step2 \
// -> step3 -> step4
// stepa /
//
// 这个案例中,存在流程1:step1 -> step2的流程
// 存在流程2:stepa
// 存在流程3:step3 -> step4,step3要求step2与stepa作为输入
// 此时,可以让流程1使用stream1,流程2使用stream2,而流程3继续使用stream1,仅仅在stream1中加入等待(event的等待)
// step1 = add_vector
// step2 = mul_vector
// step3 = add_vector
// step4 = mul_vector
// stepa = add_vector
#define step1 add_vector
#define step2 mul_vector
#define step3 add_vector
#define step4 mul_vector
#define stepa add_vector
cudaEvent_t event_async;
checkRuntime(cudaEventCreate(&event_async));
// stream1的执行流程
step1<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
step2<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
// 等待event_async有事件
checkRuntime(cudaStreamWaitEvent(stream1, event_async));
step3<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
step4<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
// stream2的执行流程
stepa<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
// 为event_async触发事件,通知cudaStreamWaitEvent函数可以继续了
checkRuntime(cudaEventRecord(event_async, stream2));
checkRuntime(cudaStreamSynchronize(stream1));
printf("multi_stream_async done.\n");
}
int main(){
// 本程序实现两个核函数的并行,通过多个流实现
checkRuntime(cudaStreamCreate(&stream1));
checkRuntime(cudaStreamCreate(&stream2));
checkRuntime(cudaMalloc(&a, bytes));
checkRuntime(cudaMalloc(&b, bytes));
checkRuntime(cudaMalloc(&c1, bytes));
checkRuntime(cudaMalloc(&c2, bytes));
// 演示多流之间的异步执行
async();
// 演示单个流内的同步执行
sync();
// 演示多个流之间互相等待的操作
multi_stream_async();
return 0;
} | code for sm_80
Function : _Z10mul_vectorPKfS0_Pfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00d0*/ FMUL R9, R4, R3 ; /* 0x0000000304097220 */
/* 0x004fca0000400000 */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z10add_vectorPKfS0_Pfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // CUDA运行时头文件
#include <cuda_runtime.h>
#include <chrono>
#include <stdio.h>
#include <string.h>
using namespace std;
#define checkRuntime(op) __check_cuda_runtime((op), #op, __FILE__, __LINE__)
bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, int line){
if(code != cudaSuccess){
const char* err_name = cudaGetErrorName(code);
const char* err_message = cudaGetErrorString(code);
printf("runtime error %s:%d %s failed. \n code = %s, message = %s\n", file, line, op, err_name, err_message);
return false;
}
return true;
}
__global__ void add_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] + b[index];
}
__global__ void mul_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] * b[index];
}
cudaStream_t stream1, stream2;
float *a, *b, *c1, *c2;
const int num_element = 100000;
const size_t bytes = sizeof(float) * num_element;
const int blocks = 512;
const int grids = (num_element + blocks - 1) / blocks;
const int ntry = 1000;
// 多个流异步
void async(){
cudaEvent_t event_start1, event_stop1;
cudaEvent_t event_start2, event_stop2;
checkRuntime(cudaEventCreate(&event_start1));
checkRuntime(cudaEventCreate(&event_stop1));
checkRuntime(cudaEventCreate(&event_start2));
checkRuntime(cudaEventCreate(&event_stop2));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(cudaEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
checkRuntime(cudaEventRecord(event_stop1, stream1));
checkRuntime(cudaEventRecord(event_start2, stream2));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
checkRuntime(cudaEventRecord(event_stop2, stream2));
checkRuntime(cudaStreamSynchronize(stream1));
checkRuntime(cudaStreamSynchronize(stream2));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1, time2;
checkRuntime(cudaEventElapsedTime(&time1, event_start1, event_stop1));
checkRuntime(cudaEventElapsedTime(&time2, event_start2, event_stop2));
printf("async: time1 = %.2f ms, time2 = %.2f ms, count = %.2f ms\n", time1, time2, toc - tic);
}
// 单个流串行
void sync(){
cudaEvent_t event_start1, event_stop1;
checkRuntime(cudaEventCreate(&event_start1));
checkRuntime(cudaEventCreate(&event_stop1));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(cudaEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
checkRuntime(cudaEventRecord(event_stop1, stream1));
checkRuntime(cudaStreamSynchronize(stream1));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1;
checkRuntime(cudaEventElapsedTime(&time1, event_start1, event_stop1));
printf("sync: time1 = %.2f ms, count = %.2f ms\n", time1, toc - tic);
}
// 多个流之间并行
void multi_stream_async(){
// 这个案例主要实现多个流之间互相等待,使用event控制实现
// 存在step1 -> step2 \
// -> step3 -> step4
// stepa /
//
// 这个案例中,存在流程1:step1 -> step2的流程
// 存在流程2:stepa
// 存在流程3:step3 -> step4,step3要求step2与stepa作为输入
// 此时,可以让流程1使用stream1,流程2使用stream2,而流程3继续使用stream1,仅仅在stream1中加入等待(event的等待)
// step1 = add_vector
// step2 = mul_vector
// step3 = add_vector
// step4 = mul_vector
// stepa = add_vector
#define step1 add_vector
#define step2 mul_vector
#define step3 add_vector
#define step4 mul_vector
#define stepa add_vector
cudaEvent_t event_async;
checkRuntime(cudaEventCreate(&event_async));
// stream1的执行流程
step1<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
step2<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
// 等待event_async有事件
checkRuntime(cudaStreamWaitEvent(stream1, event_async));
step3<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
step4<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
// stream2的执行流程
stepa<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
// 为event_async触发事件,通知cudaStreamWaitEvent函数可以继续了
checkRuntime(cudaEventRecord(event_async, stream2));
checkRuntime(cudaStreamSynchronize(stream1));
printf("multi_stream_async done.\n");
}
int main(){
// 本程序实现两个核函数的并行,通过多个流实现
checkRuntime(cudaStreamCreate(&stream1));
checkRuntime(cudaStreamCreate(&stream2));
checkRuntime(cudaMalloc(&a, bytes));
checkRuntime(cudaMalloc(&b, bytes));
checkRuntime(cudaMalloc(&c1, bytes));
checkRuntime(cudaMalloc(&c2, bytes));
// 演示多流之间的异步执行
async();
// 演示单个流内的同步执行
sync();
// 演示多个流之间互相等待的操作
multi_stream_async();
return 0;
} | .file "tmpxft_000b76a3_00000000-6_main.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2163:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2163:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "runtime error %s:%d %s failed. \n code = %s, message = %s\n"
.text
.globl _Z20__check_cuda_runtime9cudaErrorPKcS1_i
.type _Z20__check_cuda_runtime9cudaErrorPKcS1_i, @function
_Z20__check_cuda_runtime9cudaErrorPKcS1_i:
.LFB2156:
.cfi_startproc
endbr64
movl $1, %eax
testl %edi, %edi
jne .L10
ret
.L10:
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl %edi, %ebx
movq %rsi, %r12
movq %rdx, %r13
movl %ecx, %ebp
call cudaGetErrorName@PLT
movq %rax, %r14
movl %ebx, %edi
call cudaGetErrorString@PLT
subq $8, %rsp
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
movq %r14, %r9
movq %r12, %r8
movl %ebp, %ecx
movq %r13, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $16, %rsp
.cfi_def_cfa_offset 48
movl $0, %eax
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2156:
.size _Z20__check_cuda_runtime9cudaErrorPKcS1_i, .-_Z20__check_cuda_runtime9cudaErrorPKcS1_i
.globl _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi
.type _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi, @function
_Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi:
.LFB2185:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10add_vectorPKfS0_Pfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2185:
.size _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi, .-_Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi
.globl _Z10add_vectorPKfS0_Pfi
.type _Z10add_vectorPKfS0_Pfi, @function
_Z10add_vectorPKfS0_Pfi:
.LFB2186:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2186:
.size _Z10add_vectorPKfS0_Pfi, .-_Z10add_vectorPKfS0_Pfi
.section .rodata.str1.8
.align 8
.LC1:
.string "/home/ubuntu/Datasets/stackv2/train-structured/jinmin527/learning-cuda-trt/main/cuda-runtime-api-1.5.2-parallel/src/main.cu"
.align 8
.LC2:
.string "cudaEventCreate(&event_start1)"
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "cudaEventCreate(&event_stop1)"
.section .rodata.str1.8
.align 8
.LC5:
.string "cudaEventRecord(event_start1, stream1)"
.align 8
.LC6:
.string "cudaEventRecord(event_stop1, stream1)"
.align 8
.LC7:
.string "cudaStreamSynchronize(stream1)"
.align 8
.LC8:
.string "cudaEventElapsedTime(&time1, event_start1, event_stop1)"
.align 8
.LC9:
.string "sync: time1 = %.2f ms, count = %.2f ms\n"
.text
.globl _Z4syncv
.type _Z4syncv, @function
_Z4syncv:
.LFB2158:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $80, %ecx
leaq .LC1(%rip), %rbx
movq %rbx, %rdx
leaq .LC2(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $81, %ecx
movq %rbx, %rdx
leaq .LC3(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %rcx
movabsq $2361183241434822607, %rdx
imulq %rdx
sarq $7, %rdx
sarq $63, %rcx
subq %rcx, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
divsd .LC4(%rip), %xmm0
movsd %xmm0, (%rsp)
movq stream1(%rip), %rsi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $84, %ecx
movq %rbx, %rdx
leaq .LC5(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movl $1000, %ebx
jmp .L21
.L20:
subl $1, %ebx
je .L28
.L21:
movl $512, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $196, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movq stream1(%rip), %r9
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L20
movl $100000, %ecx
movq c1(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi
jmp .L20
.L28:
movl $1000, %ebx
jmp .L23
.L22:
subl $1, %ebx
je .L29
.L23:
movl $512, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $196, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movq stream1(%rip), %r9
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L22
movl $100000, %ecx
movq c2(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi
jmp .L22
.L29:
movq stream1(%rip), %rsi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $90, %ecx
leaq .LC1(%rip), %rbx
movq %rbx, %rdx
leaq .LC6(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movq stream1(%rip), %rdi
call cudaStreamSynchronize@PLT
movl %eax, %edi
movl $92, %ecx
movq %rbx, %rdx
leaq .LC7(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %rcx
movabsq $2361183241434822607, %rdx
imulq %rdx
sarq $7, %rdx
sarq $63, %rcx
subq %rcx, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
divsd .LC4(%rip), %xmm0
movsd %xmm0, 8(%rsp)
leaq 44(%rsp), %rdi
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl %eax, %edi
movl $96, %ecx
movq %rbx, %rdx
leaq .LC8(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
pxor %xmm0, %xmm0
cvtss2sd 44(%rsp), %xmm0
movsd 8(%rsp), %xmm1
subsd (%rsp), %xmm1
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L30
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2158:
.size _Z4syncv, .-_Z4syncv
.section .rodata.str1.8
.align 8
.LC10:
.string "cudaEventCreate(&event_start2)"
.section .rodata.str1.1
.LC11:
.string "cudaEventCreate(&event_stop2)"
.section .rodata.str1.8
.align 8
.LC12:
.string "cudaEventRecord(event_start2, stream2)"
.align 8
.LC13:
.string "cudaEventRecord(event_stop2, stream2)"
.align 8
.LC14:
.string "cudaStreamSynchronize(stream2)"
.align 8
.LC15:
.string "cudaEventElapsedTime(&time2, event_start2, event_stop2)"
.align 8
.LC16:
.string "async: time1 = %.2f ms, time2 = %.2f ms, count = %.2f ms\n"
.text
.globl _Z5asyncv
.type _Z5asyncv, @function
_Z5asyncv:
.LFB2157:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $80, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $50, %ecx
leaq .LC1(%rip), %rbx
movq %rbx, %rdx
leaq .LC2(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $51, %ecx
movq %rbx, %rdx
leaq .LC3(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $52, %ecx
movq %rbx, %rdx
leaq .LC10(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $53, %ecx
movq %rbx, %rdx
leaq .LC11(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %rcx
movabsq $2361183241434822607, %rdx
imulq %rdx
sarq $7, %rdx
sarq $63, %rcx
subq %rcx, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
divsd .LC4(%rip), %xmm0
movsd %xmm0, (%rsp)
movq stream1(%rip), %rsi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $56, %ecx
movq %rbx, %rdx
leaq .LC5(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movl $1000, %ebx
jmp .L33
.L32:
subl $1, %ebx
je .L40
.L33:
movl $512, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $196, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movq stream1(%rip), %r9
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L32
movl $100000, %ecx
movq c1(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi
jmp .L32
.L40:
movq stream1(%rip), %rsi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $59, %ecx
leaq .LC1(%rip), %rbx
movq %rbx, %rdx
leaq .LC6(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movq stream2(%rip), %rsi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $61, %ecx
movq %rbx, %rdx
leaq .LC12(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movl $1000, %ebx
jmp .L35
.L34:
subl $1, %ebx
je .L41
.L35:
movl $512, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $196, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movq stream2(%rip), %r9
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L34
movl $100000, %ecx
movq c2(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi
jmp .L34
.L41:
movq stream2(%rip), %rsi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $64, %ecx
leaq .LC1(%rip), %rbx
movq %rbx, %rdx
leaq .LC13(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movq stream1(%rip), %rdi
call cudaStreamSynchronize@PLT
movl %eax, %edi
movl $66, %ecx
movq %rbx, %rdx
leaq .LC7(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movq stream2(%rip), %rdi
call cudaStreamSynchronize@PLT
movl %eax, %edi
movl $67, %ecx
movq %rbx, %rdx
leaq .LC14(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %rcx
movabsq $2361183241434822607, %rdx
imulq %rdx
sarq $7, %rdx
sarq $63, %rcx
subq %rcx, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
divsd .LC4(%rip), %xmm0
movsd %xmm0, 8(%rsp)
leaq 48(%rsp), %rdi
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl %eax, %edi
movl $71, %ecx
movq %rbx, %rdx
leaq .LC8(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
leaq 60(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl %eax, %edi
movl $72, %ecx
movq %rbx, %rdx
leaq .LC15(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
pxor %xmm0, %xmm0
cvtss2sd 48(%rsp), %xmm0
movsd 8(%rsp), %xmm2
subsd (%rsp), %xmm2
pxor %xmm1, %xmm1
cvtss2sd 60(%rsp), %xmm1
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L42
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L42:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2157:
.size _Z5asyncv, .-_Z5asyncv
.globl _Z37__device_stub__Z10mul_vectorPKfS0_PfiPKfS0_Pfi
.type _Z37__device_stub__Z10mul_vectorPKfS0_PfiPKfS0_Pfi, @function
_Z37__device_stub__Z10mul_vectorPKfS0_PfiPKfS0_Pfi:
.LFB2187:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L47
.L43:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L48
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10mul_vectorPKfS0_Pfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2187:
.size _Z37__device_stub__Z10mul_vectorPKfS0_PfiPKfS0_Pfi, .-_Z37__device_stub__Z10mul_vectorPKfS0_PfiPKfS0_Pfi
.globl _Z10mul_vectorPKfS0_Pfi
.type _Z10mul_vectorPKfS0_Pfi, @function
_Z10mul_vectorPKfS0_Pfi:
.LFB2188:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z10mul_vectorPKfS0_PfiPKfS0_Pfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2188:
.size _Z10mul_vectorPKfS0_Pfi, .-_Z10mul_vectorPKfS0_Pfi
.section .rodata.str1.1
.LC17:
.string "cudaEventCreate(&event_async)"
.section .rodata.str1.8
.align 8
.LC18:
.string "cudaStreamWaitEvent(stream1, event_async)"
.align 8
.LC19:
.string "cudaEventRecord(event_async, stream2)"
.section .rodata.str1.1
.LC20:
.string "multi_stream_async done.\n"
.text
.globl _Z18multi_stream_asyncv
.type _Z18multi_stream_asyncv, @function
_Z18multi_stream_asyncv:
.LFB2159:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $48, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $125, %ecx
leaq .LC1(%rip), %rdx
leaq .LC17(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movl $512, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $196, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movq stream1(%rip), %r9
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L59
.L52:
movl $512, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $196, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movq stream1(%rip), %r9
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L60
.L53:
movl $0, %edx
movq 8(%rsp), %rsi
movq stream1(%rip), %rdi
call cudaStreamWaitEvent@PLT
movl %eax, %edi
movl $132, %ecx
leaq .LC1(%rip), %rdx
leaq .LC18(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movl $512, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $196, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movq stream1(%rip), %r9
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L61
.L54:
movl $512, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $196, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movq stream1(%rip), %r9
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L62
.L55:
movl $512, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $196, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movq stream2(%rip), %r9
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L63
.L56:
movq stream2(%rip), %rsi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $140, %ecx
leaq .LC1(%rip), %rbx
movq %rbx, %rdx
leaq .LC19(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movq stream1(%rip), %rdi
call cudaStreamSynchronize@PLT
movl %eax, %edi
movl $141, %ecx
movq %rbx, %rdx
leaq .LC7(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
leaq .LC20(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L64
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L59:
.cfi_restore_state
movl $100000, %ecx
movq c1(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi
jmp .L52
.L60:
movl $100000, %ecx
movq c1(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z37__device_stub__Z10mul_vectorPKfS0_PfiPKfS0_Pfi
jmp .L53
.L61:
movl $100000, %ecx
movq c2(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi
jmp .L54
.L62:
movl $100000, %ecx
movq c2(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z37__device_stub__Z10mul_vectorPKfS0_PfiPKfS0_Pfi
jmp .L55
.L63:
movl $100000, %ecx
movq c2(%rip), %rdx
movq b(%rip), %rsi
movq a(%rip), %rdi
call _Z37__device_stub__Z10add_vectorPKfS0_PfiPKfS0_Pfi
jmp .L56
.L64:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2159:
.size _Z18multi_stream_asyncv, .-_Z18multi_stream_asyncv
.section .rodata.str1.1
.LC21:
.string "cudaStreamCreate(&stream1)"
.LC22:
.string "cudaStreamCreate(&stream2)"
.LC23:
.string "cudaMalloc(&a, bytes)"
.LC24:
.string "cudaMalloc(&b, bytes)"
.LC25:
.string "cudaMalloc(&c1, bytes)"
.LC26:
.string "cudaMalloc(&c2, bytes)"
.text
.globl main
.type main, @function
main:
.LFB2160:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq stream1(%rip), %rdi
call cudaStreamCreate@PLT
movl %eax, %edi
movl $150, %ecx
leaq .LC1(%rip), %rbx
movq %rbx, %rdx
leaq .LC21(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
leaq stream2(%rip), %rdi
call cudaStreamCreate@PLT
movl %eax, %edi
movl $151, %ecx
movq %rbx, %rdx
leaq .LC22(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movl $400000, %esi
leaq a(%rip), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $153, %ecx
movq %rbx, %rdx
leaq .LC23(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movl $400000, %esi
leaq b(%rip), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $154, %ecx
movq %rbx, %rdx
leaq .LC24(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movl $400000, %esi
leaq c1(%rip), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $155, %ecx
movq %rbx, %rdx
leaq .LC25(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
movl $400000, %esi
leaq c2(%rip), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $156, %ecx
movq %rbx, %rdx
leaq .LC26(%rip), %rsi
call _Z20__check_cuda_runtime9cudaErrorPKcS1_i
call _Z5asyncv
call _Z4syncv
call _Z18multi_stream_asyncv
movl $0, %eax
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2160:
.size main, .-main
.section .rodata.str1.1
.LC27:
.string "_Z10mul_vectorPKfS0_Pfi"
.LC28:
.string "_Z10add_vectorPKfS0_Pfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2190:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC27(%rip), %rdx
movq %rdx, %rcx
leaq _Z10mul_vectorPKfS0_Pfi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC28(%rip), %rdx
movq %rdx, %rcx
leaq _Z10add_vectorPKfS0_Pfi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2190:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl c2
.bss
.align 8
.type c2, @object
.size c2, 8
c2:
.zero 8
.globl c1
.align 8
.type c1, @object
.size c1, 8
c1:
.zero 8
.globl b
.align 8
.type b, @object
.size b, 8
b:
.zero 8
.globl a
.align 8
.type a, @object
.size a, 8
a:
.zero 8
.globl stream2
.align 8
.type stream2, @object
.size stream2, 8
stream2:
.zero 8
.globl stream1
.align 8
.type stream1, @object
.size stream1, 8
stream1:
.zero 8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC4:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // CUDA运行时头文件
#include <cuda_runtime.h>
#include <chrono>
#include <stdio.h>
#include <string.h>
using namespace std;
#define checkRuntime(op) __check_cuda_runtime((op), #op, __FILE__, __LINE__)
bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, int line){
if(code != cudaSuccess){
const char* err_name = cudaGetErrorName(code);
const char* err_message = cudaGetErrorString(code);
printf("runtime error %s:%d %s failed. \n code = %s, message = %s\n", file, line, op, err_name, err_message);
return false;
}
return true;
}
__global__ void add_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] + b[index];
}
__global__ void mul_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] * b[index];
}
cudaStream_t stream1, stream2;
float *a, *b, *c1, *c2;
const int num_element = 100000;
const size_t bytes = sizeof(float) * num_element;
const int blocks = 512;
const int grids = (num_element + blocks - 1) / blocks;
const int ntry = 1000;
// 多个流异步
void async(){
cudaEvent_t event_start1, event_stop1;
cudaEvent_t event_start2, event_stop2;
checkRuntime(cudaEventCreate(&event_start1));
checkRuntime(cudaEventCreate(&event_stop1));
checkRuntime(cudaEventCreate(&event_start2));
checkRuntime(cudaEventCreate(&event_stop2));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(cudaEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
checkRuntime(cudaEventRecord(event_stop1, stream1));
checkRuntime(cudaEventRecord(event_start2, stream2));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
checkRuntime(cudaEventRecord(event_stop2, stream2));
checkRuntime(cudaStreamSynchronize(stream1));
checkRuntime(cudaStreamSynchronize(stream2));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1, time2;
checkRuntime(cudaEventElapsedTime(&time1, event_start1, event_stop1));
checkRuntime(cudaEventElapsedTime(&time2, event_start2, event_stop2));
printf("async: time1 = %.2f ms, time2 = %.2f ms, count = %.2f ms\n", time1, time2, toc - tic);
}
// 单个流串行
void sync(){
cudaEvent_t event_start1, event_stop1;
checkRuntime(cudaEventCreate(&event_start1));
checkRuntime(cudaEventCreate(&event_stop1));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(cudaEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
checkRuntime(cudaEventRecord(event_stop1, stream1));
checkRuntime(cudaStreamSynchronize(stream1));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1;
checkRuntime(cudaEventElapsedTime(&time1, event_start1, event_stop1));
printf("sync: time1 = %.2f ms, count = %.2f ms\n", time1, toc - tic);
}
// 多个流之间并行
void multi_stream_async(){
// 这个案例主要实现多个流之间互相等待,使用event控制实现
// 存在step1 -> step2 \
// -> step3 -> step4
// stepa /
//
// 这个案例中,存在流程1:step1 -> step2的流程
// 存在流程2:stepa
// 存在流程3:step3 -> step4,step3要求step2与stepa作为输入
// 此时,可以让流程1使用stream1,流程2使用stream2,而流程3继续使用stream1,仅仅在stream1中加入等待(event的等待)
// step1 = add_vector
// step2 = mul_vector
// step3 = add_vector
// step4 = mul_vector
// stepa = add_vector
#define step1 add_vector
#define step2 mul_vector
#define step3 add_vector
#define step4 mul_vector
#define stepa add_vector
cudaEvent_t event_async;
checkRuntime(cudaEventCreate(&event_async));
// stream1的执行流程
step1<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
step2<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
// 等待event_async有事件
checkRuntime(cudaStreamWaitEvent(stream1, event_async));
step3<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
step4<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
// stream2的执行流程
stepa<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
// 为event_async触发事件,通知cudaStreamWaitEvent函数可以继续了
checkRuntime(cudaEventRecord(event_async, stream2));
checkRuntime(cudaStreamSynchronize(stream1));
printf("multi_stream_async done.\n");
}
int main(){
// 本程序实现两个核函数的并行,通过多个流实现
checkRuntime(cudaStreamCreate(&stream1));
checkRuntime(cudaStreamCreate(&stream2));
checkRuntime(cudaMalloc(&a, bytes));
checkRuntime(cudaMalloc(&b, bytes));
checkRuntime(cudaMalloc(&c1, bytes));
checkRuntime(cudaMalloc(&c2, bytes));
// 演示多流之间的异步执行
async();
// 演示单个流内的同步执行
sync();
// 演示多个流之间互相等待的操作
multi_stream_async();
return 0;
} | // CUDA运行时头文件
#include <hip/hip_runtime.h>
#include <chrono>
#include <stdio.h>
#include <string.h>
using namespace std;
#define checkRuntime(op) __check_cuda_runtime((op), #op, __FILE__, __LINE__)
bool __check_cuda_runtime(hipError_t code, const char* op, const char* file, int line){
if(code != hipSuccess){
const char* err_name = hipGetErrorName(code);
const char* err_message = hipGetErrorString(code);
printf("runtime error %s:%d %s failed. \n code = %s, message = %s\n", file, line, op, err_name, err_message);
return false;
}
return true;
}
__global__ void add_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] + b[index];
}
__global__ void mul_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] * b[index];
}
hipStream_t stream1, stream2;
float *a, *b, *c1, *c2;
const int num_element = 100000;
const size_t bytes = sizeof(float) * num_element;
const int blocks = 512;
const int grids = (num_element + blocks - 1) / blocks;
const int ntry = 1000;
// 多个流异步
void async(){
hipEvent_t event_start1, event_stop1;
hipEvent_t event_start2, event_stop2;
checkRuntime(hipEventCreate(&event_start1));
checkRuntime(hipEventCreate(&event_stop1));
checkRuntime(hipEventCreate(&event_start2));
checkRuntime(hipEventCreate(&event_stop2));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(hipEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
checkRuntime(hipEventRecord(event_stop1, stream1));
checkRuntime(hipEventRecord(event_start2, stream2));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
checkRuntime(hipEventRecord(event_stop2, stream2));
checkRuntime(hipStreamSynchronize(stream1));
checkRuntime(hipStreamSynchronize(stream2));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1, time2;
checkRuntime(hipEventElapsedTime(&time1, event_start1, event_stop1));
checkRuntime(hipEventElapsedTime(&time2, event_start2, event_stop2));
printf("async: time1 = %.2f ms, time2 = %.2f ms, count = %.2f ms\n", time1, time2, toc - tic);
}
// 单个流串行
void sync(){
hipEvent_t event_start1, event_stop1;
checkRuntime(hipEventCreate(&event_start1));
checkRuntime(hipEventCreate(&event_stop1));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(hipEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
checkRuntime(hipEventRecord(event_stop1, stream1));
checkRuntime(hipStreamSynchronize(stream1));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1;
checkRuntime(hipEventElapsedTime(&time1, event_start1, event_stop1));
printf("sync: time1 = %.2f ms, count = %.2f ms\n", time1, toc - tic);
}
// 多个流之间并行
void multi_stream_async(){
// 这个案例主要实现多个流之间互相等待,使用event控制实现
// 存在step1 -> step2 \
// -> step3 -> step4
// stepa /
//
// 这个案例中,存在流程1:step1 -> step2的流程
// 存在流程2:stepa
// 存在流程3:step3 -> step4,step3要求step2与stepa作为输入
// 此时,可以让流程1使用stream1,流程2使用stream2,而流程3继续使用stream1,仅仅在stream1中加入等待(event的等待)
// step1 = add_vector
// step2 = mul_vector
// step3 = add_vector
// step4 = mul_vector
// stepa = add_vector
#define step1 add_vector
#define step2 mul_vector
#define step3 add_vector
#define step4 mul_vector
#define stepa add_vector
hipEvent_t event_async;
checkRuntime(hipEventCreate(&event_async));
// stream1的执行流程
step1<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
step2<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
// 等待event_async有事件
checkRuntime(hipStreamWaitEvent(stream1, event_async));
step3<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
step4<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
// stream2的执行流程
stepa<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
// 为event_async触发事件,通知cudaStreamWaitEvent函数可以继续了
checkRuntime(hipEventRecord(event_async, stream2));
checkRuntime(hipStreamSynchronize(stream1));
printf("multi_stream_async done.\n");
}
int main(){
// 本程序实现两个核函数的并行,通过多个流实现
checkRuntime(hipStreamCreate(&stream1));
checkRuntime(hipStreamCreate(&stream2));
checkRuntime(hipMalloc(&a, bytes));
checkRuntime(hipMalloc(&b, bytes));
checkRuntime(hipMalloc(&c1, bytes));
checkRuntime(hipMalloc(&c2, bytes));
// 演示多流之间的异步执行
async();
// 演示单个流内的同步执行
sync();
// 演示多个流之间互相等待的操作
multi_stream_async();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // CUDA运行时头文件
#include <hip/hip_runtime.h>
#include <chrono>
#include <stdio.h>
#include <string.h>
using namespace std;
#define checkRuntime(op) __check_cuda_runtime((op), #op, __FILE__, __LINE__)
bool __check_cuda_runtime(hipError_t code, const char* op, const char* file, int line){
if(code != hipSuccess){
const char* err_name = hipGetErrorName(code);
const char* err_message = hipGetErrorString(code);
printf("runtime error %s:%d %s failed. \n code = %s, message = %s\n", file, line, op, err_name, err_message);
return false;
}
return true;
}
__global__ void add_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] + b[index];
}
__global__ void mul_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] * b[index];
}
hipStream_t stream1, stream2;
float *a, *b, *c1, *c2;
const int num_element = 100000;
const size_t bytes = sizeof(float) * num_element;
const int blocks = 512;
const int grids = (num_element + blocks - 1) / blocks;
const int ntry = 1000;
// 多个流异步
void async(){
hipEvent_t event_start1, event_stop1;
hipEvent_t event_start2, event_stop2;
checkRuntime(hipEventCreate(&event_start1));
checkRuntime(hipEventCreate(&event_stop1));
checkRuntime(hipEventCreate(&event_start2));
checkRuntime(hipEventCreate(&event_stop2));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(hipEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
checkRuntime(hipEventRecord(event_stop1, stream1));
checkRuntime(hipEventRecord(event_start2, stream2));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
checkRuntime(hipEventRecord(event_stop2, stream2));
checkRuntime(hipStreamSynchronize(stream1));
checkRuntime(hipStreamSynchronize(stream2));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1, time2;
checkRuntime(hipEventElapsedTime(&time1, event_start1, event_stop1));
checkRuntime(hipEventElapsedTime(&time2, event_start2, event_stop2));
printf("async: time1 = %.2f ms, time2 = %.2f ms, count = %.2f ms\n", time1, time2, toc - tic);
}
// 单个流串行
void sync(){
hipEvent_t event_start1, event_stop1;
checkRuntime(hipEventCreate(&event_start1));
checkRuntime(hipEventCreate(&event_stop1));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(hipEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
checkRuntime(hipEventRecord(event_stop1, stream1));
checkRuntime(hipStreamSynchronize(stream1));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1;
checkRuntime(hipEventElapsedTime(&time1, event_start1, event_stop1));
printf("sync: time1 = %.2f ms, count = %.2f ms\n", time1, toc - tic);
}
// 多个流之间并行
void multi_stream_async(){
// 这个案例主要实现多个流之间互相等待,使用event控制实现
// 存在step1 -> step2 \
// -> step3 -> step4
// stepa /
//
// 这个案例中,存在流程1:step1 -> step2的流程
// 存在流程2:stepa
// 存在流程3:step3 -> step4,step3要求step2与stepa作为输入
// 此时,可以让流程1使用stream1,流程2使用stream2,而流程3继续使用stream1,仅仅在stream1中加入等待(event的等待)
// step1 = add_vector
// step2 = mul_vector
// step3 = add_vector
// step4 = mul_vector
// stepa = add_vector
#define step1 add_vector
#define step2 mul_vector
#define step3 add_vector
#define step4 mul_vector
#define stepa add_vector
hipEvent_t event_async;
checkRuntime(hipEventCreate(&event_async));
// stream1的执行流程
step1<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
step2<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
// 等待event_async有事件
checkRuntime(hipStreamWaitEvent(stream1, event_async));
step3<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
step4<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
// stream2的执行流程
stepa<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
// 为event_async触发事件,通知cudaStreamWaitEvent函数可以继续了
checkRuntime(hipEventRecord(event_async, stream2));
checkRuntime(hipStreamSynchronize(stream1));
printf("multi_stream_async done.\n");
}
int main(){
// 本程序实现两个核函数的并行,通过多个流实现
checkRuntime(hipStreamCreate(&stream1));
checkRuntime(hipStreamCreate(&stream2));
checkRuntime(hipMalloc(&a, bytes));
checkRuntime(hipMalloc(&b, bytes));
checkRuntime(hipMalloc(&c1, bytes));
checkRuntime(hipMalloc(&c2, bytes));
// 演示多流之间的异步执行
async();
// 演示单个流内的同步执行
sync();
// 演示多个流之间互相等待的操作
multi_stream_async();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10add_vectorPKfS0_Pfi
.globl _Z10add_vectorPKfS0_Pfi
.p2align 8
.type _Z10add_vectorPKfS0_Pfi,@function
_Z10add_vectorPKfS0_Pfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10add_vectorPKfS0_Pfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10add_vectorPKfS0_Pfi, .Lfunc_end0-_Z10add_vectorPKfS0_Pfi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z10mul_vectorPKfS0_Pfi
.globl _Z10mul_vectorPKfS0_Pfi
.p2align 8
.type _Z10mul_vectorPKfS0_Pfi,@function
_Z10mul_vectorPKfS0_Pfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB1_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_mul_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10mul_vectorPKfS0_Pfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z10mul_vectorPKfS0_Pfi, .Lfunc_end1-_Z10mul_vectorPKfS0_Pfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10add_vectorPKfS0_Pfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10add_vectorPKfS0_Pfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10mul_vectorPKfS0_Pfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10mul_vectorPKfS0_Pfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // CUDA运行时头文件
#include <hip/hip_runtime.h>
#include <chrono>
#include <stdio.h>
#include <string.h>
using namespace std;
#define checkRuntime(op) __check_cuda_runtime((op), #op, __FILE__, __LINE__)
bool __check_cuda_runtime(hipError_t code, const char* op, const char* file, int line){
if(code != hipSuccess){
const char* err_name = hipGetErrorName(code);
const char* err_message = hipGetErrorString(code);
printf("runtime error %s:%d %s failed. \n code = %s, message = %s\n", file, line, op, err_name, err_message);
return false;
}
return true;
}
__global__ void add_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] + b[index];
}
__global__ void mul_vector(const float* a, const float* b, float* c, int count){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index >= count) return;
c[index] = a[index] * b[index];
}
hipStream_t stream1, stream2;
float *a, *b, *c1, *c2;
const int num_element = 100000;
const size_t bytes = sizeof(float) * num_element;
const int blocks = 512;
const int grids = (num_element + blocks - 1) / blocks;
const int ntry = 1000;
// 多个流异步
void async(){
hipEvent_t event_start1, event_stop1;
hipEvent_t event_start2, event_stop2;
checkRuntime(hipEventCreate(&event_start1));
checkRuntime(hipEventCreate(&event_stop1));
checkRuntime(hipEventCreate(&event_start2));
checkRuntime(hipEventCreate(&event_stop2));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(hipEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
checkRuntime(hipEventRecord(event_stop1, stream1));
checkRuntime(hipEventRecord(event_start2, stream2));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
checkRuntime(hipEventRecord(event_stop2, stream2));
checkRuntime(hipStreamSynchronize(stream1));
checkRuntime(hipStreamSynchronize(stream2));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1, time2;
checkRuntime(hipEventElapsedTime(&time1, event_start1, event_stop1));
checkRuntime(hipEventElapsedTime(&time2, event_start2, event_stop2));
printf("async: time1 = %.2f ms, time2 = %.2f ms, count = %.2f ms\n", time1, time2, toc - tic);
}
// 单个流串行
void sync(){
hipEvent_t event_start1, event_stop1;
checkRuntime(hipEventCreate(&event_start1));
checkRuntime(hipEventCreate(&event_stop1));
auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
checkRuntime(hipEventRecord(event_start1, stream1));
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
for(int i = 0; i < ntry; ++i)
add_vector<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
checkRuntime(hipEventRecord(event_stop1, stream1));
checkRuntime(hipStreamSynchronize(stream1));
auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
float time1;
checkRuntime(hipEventElapsedTime(&time1, event_start1, event_stop1));
printf("sync: time1 = %.2f ms, count = %.2f ms\n", time1, toc - tic);
}
// 多个流之间并行
void multi_stream_async(){
// 这个案例主要实现多个流之间互相等待,使用event控制实现
// 存在step1 -> step2 \
// -> step3 -> step4
// stepa /
//
// 这个案例中,存在流程1:step1 -> step2的流程
// 存在流程2:stepa
// 存在流程3:step3 -> step4,step3要求step2与stepa作为输入
// 此时,可以让流程1使用stream1,流程2使用stream2,而流程3继续使用stream1,仅仅在stream1中加入等待(event的等待)
// step1 = add_vector
// step2 = mul_vector
// step3 = add_vector
// step4 = mul_vector
// stepa = add_vector
#define step1 add_vector
#define step2 mul_vector
#define step3 add_vector
#define step4 mul_vector
#define stepa add_vector
hipEvent_t event_async;
checkRuntime(hipEventCreate(&event_async));
// stream1的执行流程
step1<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
step2<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element);
// 等待event_async有事件
checkRuntime(hipStreamWaitEvent(stream1, event_async));
step3<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
step4<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element);
// stream2的执行流程
stepa<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element);
// 为event_async触发事件,通知cudaStreamWaitEvent函数可以继续了
checkRuntime(hipEventRecord(event_async, stream2));
checkRuntime(hipStreamSynchronize(stream1));
printf("multi_stream_async done.\n");
}
int main(){
// 本程序实现两个核函数的并行,通过多个流实现
checkRuntime(hipStreamCreate(&stream1));
checkRuntime(hipStreamCreate(&stream2));
checkRuntime(hipMalloc(&a, bytes));
checkRuntime(hipMalloc(&b, bytes));
checkRuntime(hipMalloc(&c1, bytes));
checkRuntime(hipMalloc(&c2, bytes));
// 演示多流之间的异步执行
async();
// 演示单个流内的同步执行
sync();
// 演示多个流之间互相等待的操作
multi_stream_async();
return 0;
} | .text
.file "main.hip"
.globl _Z20__check_cuda_runtime10hipError_tPKcS1_i # -- Begin function _Z20__check_cuda_runtime10hipError_tPKcS1_i
.p2align 4, 0x90
.type _Z20__check_cuda_runtime10hipError_tPKcS1_i,@function
_Z20__check_cuda_runtime10hipError_tPKcS1_i: # @_Z20__check_cuda_runtime10hipError_tPKcS1_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edi, %ebx
testl %edi, %edi
je .LBB0_2
# %bb.1:
movl %ecx, %ebp
movq %rdx, %r15
movq %rsi, %r14
movl %ebx, %edi
callq hipGetErrorName
movq %rax, %r12
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %r15, %rsi
movl %ebp, %edx
movq %r14, %rcx
movq %r12, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB0_2:
testl %ebx, %ebx
sete %al
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z20__check_cuda_runtime10hipError_tPKcS1_i, .Lfunc_end0-_Z20__check_cuda_runtime10hipError_tPKcS1_i
.cfi_endproc
# -- End function
.globl _Z25__device_stub__add_vectorPKfS0_Pfi # -- Begin function _Z25__device_stub__add_vectorPKfS0_Pfi
.p2align 4, 0x90
.type _Z25__device_stub__add_vectorPKfS0_Pfi,@function
_Z25__device_stub__add_vectorPKfS0_Pfi: # @_Z25__device_stub__add_vectorPKfS0_Pfi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10add_vectorPKfS0_Pfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z25__device_stub__add_vectorPKfS0_Pfi, .Lfunc_end1-_Z25__device_stub__add_vectorPKfS0_Pfi
.cfi_endproc
# -- End function
.globl _Z25__device_stub__mul_vectorPKfS0_Pfi # -- Begin function _Z25__device_stub__mul_vectorPKfS0_Pfi
.p2align 4, 0x90
.type _Z25__device_stub__mul_vectorPKfS0_Pfi,@function
_Z25__device_stub__mul_vectorPKfS0_Pfi: # @_Z25__device_stub__mul_vectorPKfS0_Pfi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10mul_vectorPKfS0_Pfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z25__device_stub__mul_vectorPKfS0_Pfi, .Lfunc_end2-_Z25__device_stub__mul_vectorPKfS0_Pfi
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z5asyncv
.LCPI3_0:
.quad 0x408f400000000000 # double 1000
.text
.globl _Z5asyncv
.p2align 4, 0x90
.type _Z5asyncv,@function
_Z5asyncv: # @_Z5asyncv
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 136(%rsp), %rdi
callq hipEventCreate
testl %eax, %eax
je .LBB3_2
# %bb.1:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.1, %ecx
movl $50, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_2: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit
leaq 128(%rsp), %rdi
callq hipEventCreate
testl %eax, %eax
je .LBB3_4
# %bb.3:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.3, %ecx
movl $51, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_4: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit14
leaq 120(%rsp), %rdi
callq hipEventCreate
testl %eax, %eax
je .LBB3_6
# %bb.5:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.4, %ecx
movl $52, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_6: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit16
leaq 112(%rsp), %rdi
callq hipEventCreate
testl %eax, %eax
je .LBB3_8
# %bb.7:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.5, %ecx
movl $53, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_8: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit18
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, 144(%rsp) # 8-byte Spill
movq 136(%rsp), %rdi
movq stream1(%rip), %rsi
callq hipEventRecord
testl %eax, %eax
je .LBB3_10
# %bb.9:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.6, %ecx
movl $56, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_10: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit20
movl $1000, %r13d # imm = 0x3E8
movabsq $4294967492, %r14 # imm = 0x1000000C4
leaq 316(%r14), %r15
leaq 32(%rsp), %rbp
leaq 24(%rsp), %rbx
leaq 80(%rsp), %r12
jmp .LBB3_11
.p2align 4, 0x90
.LBB3_13: # in Loop: Header=BB3_11 Depth=1
decl %r13d
je .LBB3_14
.LBB3_11: # =>This Inner Loop Header: Depth=1
movq stream1(%rip), %r9
movq %r14, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_13
# %bb.12: # in Loop: Header=BB3_11 Depth=1
movq a(%rip), %rax
movq b(%rip), %rcx
movq c1(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movl $100000, 4(%rsp) # imm = 0x186A0
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rdi
leaq 40(%rsp), %rsi
movq %rbp, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 8(%rsp), %rsi
movl 16(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z10add_vectorPKfS0_Pfi, %edi
movq %r12, %r9
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB3_13
.LBB3_14:
movq 128(%rsp), %rdi
movq stream1(%rip), %rsi
callq hipEventRecord
testl %eax, %eax
je .LBB3_16
# %bb.15:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r12
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.7, %ecx
movl $59, %edx
movq %r12, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_16: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit22
movq 120(%rsp), %rdi
movq stream2(%rip), %rsi
callq hipEventRecord
testl %eax, %eax
je .LBB3_18
# %bb.17:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r12
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.8, %ecx
movl $61, %edx
movq %r12, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_18: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit24
movl $1000, %r13d # imm = 0x3E8
leaq 32(%rsp), %rbp
leaq 24(%rsp), %rbx
leaq 80(%rsp), %r12
jmp .LBB3_19
.p2align 4, 0x90
.LBB3_21: # in Loop: Header=BB3_19 Depth=1
decl %r13d
je .LBB3_22
.LBB3_19: # =>This Inner Loop Header: Depth=1
movq stream2(%rip), %r9
movq %r14, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_21
# %bb.20: # in Loop: Header=BB3_19 Depth=1
movq a(%rip), %rax
movq b(%rip), %rcx
movq c2(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movl $100000, 4(%rsp) # imm = 0x186A0
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rdi
leaq 40(%rsp), %rsi
movq %rbp, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 8(%rsp), %rsi
movl 16(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
movl $_Z10add_vectorPKfS0_Pfi, %edi
movq %r12, %r9
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB3_21
.LBB3_22:
movq 112(%rsp), %rdi
movq stream2(%rip), %rsi
callq hipEventRecord
testl %eax, %eax
je .LBB3_24
# %bb.23:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.9, %ecx
movl $64, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_24: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit27
movq stream1(%rip), %rdi
callq hipStreamSynchronize
testl %eax, %eax
je .LBB3_26
# %bb.25:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.10, %ecx
movl $66, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_26: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit29
movq stream2(%rip), %rdi
callq hipStreamSynchronize
testl %eax, %eax
je .LBB3_28
# %bb.27:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.11, %ecx
movl $67, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_28: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit31
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r14
movq 136(%rsp), %rsi
movq 128(%rsp), %rdx
leaq 80(%rsp), %rdi
callq hipEventElapsedTime
testl %eax, %eax
je .LBB3_30
# %bb.29:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r15
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.12, %ecx
movl $71, %edx
movq %r15, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_30: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit34
movq 120(%rsp), %rsi
movq 112(%rsp), %rdx
leaq 8(%rsp), %rdi
callq hipEventElapsedTime
testl %eax, %eax
je .LBB3_32
# %bb.31:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r15
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.13, %ecx
movl $72, %edx
movq %r15, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB3_32: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit36
movabsq $2361183241434822607, %rcx # imm = 0x20C49BA5E353F7CF
movq %r14, %rax
imulq %rcx
movq %rdx, %rax
shrq $63, %rax
sarq $7, %rdx
addq %rax, %rdx
cvtsi2sd %rdx, %xmm2
movsd .LCPI3_0(%rip), %xmm0 # xmm0 = mem[0],zero
divsd %xmm0, %xmm2
movq 144(%rsp), %rax # 8-byte Reload
imulq %rcx
movq %rdx, %rax
shrq $63, %rax
sarq $7, %rdx
addq %rax, %rdx
cvtsi2sd %rdx, %xmm1
divsd %xmm0, %xmm1
subsd %xmm1, %xmm2
movss 80(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movss 8(%rsp), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movl $.L.str.14, %edi
movb $3, %al
callq printf
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z5asyncv, .Lfunc_end3-_Z5asyncv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function sync
.LCPI4_0:
.quad 0x408f400000000000 # double 1000
.text
.globl sync
.p2align 4, 0x90
.type sync,@function
sync: # @sync
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
.Ltmp0:
.cfi_escape 0x2e, 0x00
leaq 128(%rsp), %rdi
callq hipEventCreate
.Ltmp1:
# %bb.1:
movl %eax, %ebp
testl %eax, %eax
je .LBB4_5
# %bb.2:
.Ltmp2:
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorName
.Ltmp3:
# %bb.3: # %.noexc
.Ltmp4:
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorString
.Ltmp5:
# %bb.4: # %.noexc13
.cfi_escape 0x2e, 0x00
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.1, %ecx
movl $80, %edx
movq %rbx, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB4_5: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit
.Ltmp6:
.cfi_escape 0x2e, 0x00
leaq 120(%rsp), %rdi
callq hipEventCreate
.Ltmp7:
# %bb.6:
movl %eax, %ebp
testl %eax, %eax
je .LBB4_10
# %bb.7:
.Ltmp8:
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorName
.Ltmp9:
# %bb.8: # %.noexc15
.Ltmp10:
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorString
.Ltmp11:
# %bb.9: # %.noexc16
.cfi_escape 0x2e, 0x00
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.3, %ecx
movl $81, %edx
movq %rbx, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB4_10: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit17
.cfi_escape 0x2e, 0x00
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, 144(%rsp) # 8-byte Spill
movq 128(%rsp), %rdi
movq stream1(%rip), %rsi
.Ltmp12:
.cfi_escape 0x2e, 0x00
callq hipEventRecord
.Ltmp13:
# %bb.11:
movl %eax, %ebp
testl %eax, %eax
je .LBB4_15
# %bb.12:
.Ltmp14:
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorName
.Ltmp15:
# %bb.13: # %.noexc19
.Ltmp16:
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorString
.Ltmp17:
# %bb.14: # %.noexc20
.cfi_escape 0x2e, 0x00
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.6, %ecx
movl $84, %edx
movq %rbx, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB4_15: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit21
movl $1000, %ebx # imm = 0x3E8
movabsq $4294967492, %r14 # imm = 0x1000000C4
movabsq $4294967808, %r15 # imm = 0x100000200
leaq 136(%rsp), %r13
leaq 12(%rsp), %rbp
leaq 80(%rsp), %r12
jmp .LBB4_16
.p2align 4, 0x90
.LBB4_20: # in Loop: Header=BB4_16 Depth=1
decl %ebx
je .LBB4_21
.LBB4_16: # =>This Inner Loop Header: Depth=1
movq stream1(%rip), %r9
.Ltmp18:
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
.Ltmp19:
# %bb.17: # in Loop: Header=BB4_16 Depth=1
testl %eax, %eax
jne .LBB4_20
# %bb.18: # in Loop: Header=BB4_16 Depth=1
movq a(%rip), %rax
movq b(%rip), %rcx
movq c1(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 136(%rsp)
movl $100000, 12(%rsp) # imm = 0x186A0
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
movq %r13, 96(%rsp)
movq %rbp, 104(%rsp)
.Ltmp20:
.cfi_escape 0x2e, 0x00
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
.Ltmp21:
# %bb.19: # %.noexc23
# in Loop: Header=BB4_16 Depth=1
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
.Ltmp22:
.cfi_escape 0x2e, 0x10
movl $_Z10add_vectorPKfS0_Pfi, %edi
movq %r12, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.Ltmp23:
jmp .LBB4_20
.LBB4_21: # %.preheader
movl $1000, %ebx # imm = 0x3E8
movabsq $4294967492, %r14 # imm = 0x1000000C4
movabsq $4294967808, %r15 # imm = 0x100000200
leaq 80(%rsp), %r12
jmp .LBB4_22
.p2align 4, 0x90
.LBB4_26: # in Loop: Header=BB4_22 Depth=1
decl %ebx
je .LBB4_27
.LBB4_22: # =>This Inner Loop Header: Depth=1
movq stream1(%rip), %r9
.Ltmp25:
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
.Ltmp26:
# %bb.23: # in Loop: Header=BB4_22 Depth=1
testl %eax, %eax
jne .LBB4_26
# %bb.24: # in Loop: Header=BB4_22 Depth=1
movq a(%rip), %rax
movq b(%rip), %rcx
movq c2(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 136(%rsp)
movl $100000, 12(%rsp) # imm = 0x186A0
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
movq %r13, 96(%rsp)
movq %rbp, 104(%rsp)
.Ltmp27:
.cfi_escape 0x2e, 0x00
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
.Ltmp28:
# %bb.25: # %.noexc31
# in Loop: Header=BB4_22 Depth=1
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
.Ltmp29:
.cfi_escape 0x2e, 0x10
movl $_Z10add_vectorPKfS0_Pfi, %edi
movq %r12, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.Ltmp30:
jmp .LBB4_26
.LBB4_27:
movq 120(%rsp), %rdi
movq stream1(%rip), %rsi
.Ltmp32:
.cfi_escape 0x2e, 0x00
callq hipEventRecord
.Ltmp33:
# %bb.28:
movl %eax, %ebp
testl %eax, %eax
je .LBB4_32
# %bb.29:
.Ltmp34:
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorName
.Ltmp35:
# %bb.30: # %.noexc35
.Ltmp36:
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorString
.Ltmp37:
# %bb.31: # %.noexc36
.cfi_escape 0x2e, 0x00
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.7, %ecx
movl $90, %edx
movq %rbx, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB4_32: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit37
movq stream1(%rip), %rdi
.Ltmp38:
.cfi_escape 0x2e, 0x00
callq hipStreamSynchronize
.Ltmp39:
# %bb.33:
movl %eax, %ebp
testl %eax, %eax
je .LBB4_37
# %bb.34:
.Ltmp40:
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorName
.Ltmp41:
# %bb.35: # %.noexc39
.Ltmp42:
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorString
.Ltmp43:
# %bb.36: # %.noexc40
.cfi_escape 0x2e, 0x00
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.10, %ecx
movl $92, %edx
movq %rbx, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB4_37: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit41
.cfi_escape 0x2e, 0x00
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %rbx
movq 128(%rsp), %rsi
movq 120(%rsp), %rdx
.Ltmp44:
.cfi_escape 0x2e, 0x00
leaq 80(%rsp), %rdi
callq hipEventElapsedTime
.Ltmp45:
# %bb.38:
movl %eax, %ebp
testl %eax, %eax
je .LBB4_42
# %bb.39:
.Ltmp46:
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorName
.Ltmp47:
# %bb.40: # %.noexc44
.Ltmp48:
movq %rax, %r14
.cfi_escape 0x2e, 0x00
movl %ebp, %edi
callq hipGetErrorString
.Ltmp49:
# %bb.41: # %.noexc45
.cfi_escape 0x2e, 0x00
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.12, %ecx
movl $96, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB4_42: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit46
movabsq $2361183241434822607, %rsi # imm = 0x20C49BA5E353F7CF
movq %rbx, %rax
imulq %rsi
movq %rdx, %rcx
movq %rdx, %rdi
shrq $63, %rdi
sarq $7, %rcx
movq 144(%rsp), %rax # 8-byte Reload
imulq %rsi
addq %rdi, %rcx
movq %rdx, %rax
shrq $63, %rax
sarq $7, %rdx
addq %rax, %rdx
cvtsi2sd %rcx, %xmm1
movsd .LCPI4_0(%rip), %xmm0 # xmm0 = mem[0],zero
divsd %xmm0, %xmm1
cvtsi2sd %rdx, %xmm2
divsd %xmm0, %xmm2
subsd %xmm2, %xmm1
movss 80(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
.cfi_escape 0x2e, 0x00
movl $.L.str.15, %edi
movb $2, %al
callq printf
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB4_45: # %.loopexit.split-lp.loopexit.split-lp
.cfi_def_cfa_offset 208
.Ltmp50:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq __clang_call_terminate
.LBB4_43: # %.loopexit
.Ltmp31:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq __clang_call_terminate
.LBB4_44: # %.loopexit.split-lp.loopexit
.Ltmp24:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq __clang_call_terminate
.Lfunc_end4:
.size sync, .Lfunc_end4-sync
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table4:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 3 # @TType Encoding = udata4
.uleb128 .Lttbase0-.Lttbaseref0
.Lttbaseref0:
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp17-.Ltmp0 # Call between .Ltmp0 and .Ltmp17
.uleb128 .Ltmp50-.Lfunc_begin0 # jumps to .Ltmp50
.byte 1 # On action: 1
.uleb128 .Ltmp18-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp23-.Ltmp18 # Call between .Ltmp18 and .Ltmp23
.uleb128 .Ltmp24-.Lfunc_begin0 # jumps to .Ltmp24
.byte 1 # On action: 1
.uleb128 .Ltmp25-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp30-.Ltmp25 # Call between .Ltmp25 and .Ltmp30
.uleb128 .Ltmp31-.Lfunc_begin0 # jumps to .Ltmp31
.byte 1 # On action: 1
.uleb128 .Ltmp32-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp49-.Ltmp32 # Call between .Ltmp32 and .Ltmp49
.uleb128 .Ltmp50-.Lfunc_begin0 # jumps to .Ltmp50
.byte 1 # On action: 1
.Lcst_end0:
.byte 1 # >> Action Record 1 <<
# Catch TypeInfo 1
.byte 0 # No further actions
.p2align 2, 0x0
# >> Catch TypeInfos <<
.long 0 # TypeInfo 1
.Lttbase0:
.p2align 2, 0x0
# -- End function
.section .text.__clang_call_terminate,"axG",@progbits,__clang_call_terminate,comdat
.hidden __clang_call_terminate # -- Begin function __clang_call_terminate
.weak __clang_call_terminate
.p2align 4, 0x90
.type __clang_call_terminate,@function
__clang_call_terminate: # @__clang_call_terminate
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq __cxa_begin_catch
callq _ZSt9terminatev
.Lfunc_end5:
.size __clang_call_terminate, .Lfunc_end5-__clang_call_terminate
.cfi_endproc
# -- End function
.text
.globl _Z18multi_stream_asyncv # -- Begin function _Z18multi_stream_asyncv
.p2align 4, 0x90
.type _Z18multi_stream_asyncv,@function
_Z18multi_stream_asyncv: # @_Z18multi_stream_asyncv
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 112(%rsp), %rdi
callq hipEventCreate
testl %eax, %eax
je .LBB6_2
# %bb.1:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.16, %ecx
movl $125, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB6_2: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit
movabsq $4294967492, %rbx # imm = 0x1000000C4
movq stream1(%rip), %r9
leaq 316(%rbx), %r14
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_4
# %bb.3:
movq a(%rip), %rax
movq b(%rip), %rcx
movq c1(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movl $100000, 4(%rsp) # imm = 0x186A0
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10add_vectorPKfS0_Pfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_4:
movq stream1(%rip), %r9
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_6
# %bb.5:
movq a(%rip), %rax
movq b(%rip), %rcx
movq c1(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movl $100000, 4(%rsp) # imm = 0x186A0
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10mul_vectorPKfS0_Pfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_6:
movq stream1(%rip), %rdi
movq 112(%rsp), %rsi
xorl %edx, %edx
callq hipStreamWaitEvent
testl %eax, %eax
je .LBB6_8
# %bb.7:
movl %eax, %ebp
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r15
movl %ebp, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.17, %ecx
movl $132, %edx
movq %r15, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB6_8: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit30
movq stream1(%rip), %r9
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_10
# %bb.9:
movq a(%rip), %rax
movq b(%rip), %rcx
movq c2(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movl $100000, 4(%rsp) # imm = 0x186A0
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10add_vectorPKfS0_Pfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_10:
movq stream1(%rip), %r9
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_12
# %bb.11:
movq a(%rip), %rax
movq b(%rip), %rcx
movq c2(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movl $100000, 4(%rsp) # imm = 0x186A0
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10mul_vectorPKfS0_Pfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_12:
movq stream2(%rip), %r9
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_14
# %bb.13:
movq a(%rip), %rax
movq b(%rip), %rcx
movq c2(%rip), %rdx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movq %rdx, 56(%rsp)
movl $100000, 4(%rsp) # imm = 0x186A0
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10add_vectorPKfS0_Pfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_14:
movq 112(%rsp), %rdi
movq stream2(%rip), %rsi
callq hipEventRecord
testl %eax, %eax
je .LBB6_16
# %bb.15:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.18, %ecx
movl $140, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB6_16: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit50
movq stream1(%rip), %rdi
callq hipStreamSynchronize
testl %eax, %eax
je .LBB6_18
# %bb.17:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.10, %ecx
movl $141, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB6_18: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit52
movl $.Lstr, %edi
callq puts@PLT
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end6:
.size _Z18multi_stream_asyncv, .Lfunc_end6-_Z18multi_stream_asyncv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $stream1, %edi
callq hipStreamCreate
testl %eax, %eax
je .LBB7_2
# %bb.1:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.20, %ecx
movl $150, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB7_2: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit
movl $stream2, %edi
callq hipStreamCreate
testl %eax, %eax
je .LBB7_4
# %bb.3:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.21, %ecx
movl $151, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB7_4: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit2
movl $a, %edi
movl $400000, %esi # imm = 0x61A80
callq hipMalloc
testl %eax, %eax
je .LBB7_6
# %bb.5:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.22, %ecx
movl $153, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB7_6: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit4
movl $b, %edi
movl $400000, %esi # imm = 0x61A80
callq hipMalloc
testl %eax, %eax
je .LBB7_8
# %bb.7:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.23, %ecx
movl $154, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB7_8: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit6
movl $c1, %edi
movl $400000, %esi # imm = 0x61A80
callq hipMalloc
testl %eax, %eax
je .LBB7_10
# %bb.9:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.24, %ecx
movl $155, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB7_10: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit8
movl $c2, %edi
movl $400000, %esi # imm = 0x61A80
callq hipMalloc
testl %eax, %eax
je .LBB7_12
# %bb.11:
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorName
movq %rax, %r14
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
movl $.L.str.25, %ecx
movl $156, %edx
movq %r14, %r8
movq %rax, %r9
xorl %eax, %eax
callq printf
.LBB7_12: # %_Z20__check_cuda_runtime10hipError_tPKcS1_i.exit10
callq _Z5asyncv
callq sync
callq _Z18multi_stream_asyncv
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size main, .Lfunc_end7-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10add_vectorPKfS0_Pfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10mul_vectorPKfS0_Pfi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "runtime error %s:%d %s failed. \n code = %s, message = %s\n"
.size .L.str, 60
.type _Z10add_vectorPKfS0_Pfi,@object # @_Z10add_vectorPKfS0_Pfi
.section .rodata,"a",@progbits
.globl _Z10add_vectorPKfS0_Pfi
.p2align 3, 0x0
_Z10add_vectorPKfS0_Pfi:
.quad _Z25__device_stub__add_vectorPKfS0_Pfi
.size _Z10add_vectorPKfS0_Pfi, 8
.type _Z10mul_vectorPKfS0_Pfi,@object # @_Z10mul_vectorPKfS0_Pfi
.globl _Z10mul_vectorPKfS0_Pfi
.p2align 3, 0x0
_Z10mul_vectorPKfS0_Pfi:
.quad _Z25__device_stub__mul_vectorPKfS0_Pfi
.size _Z10mul_vectorPKfS0_Pfi, 8
.type stream1,@object # @stream1
.bss
.globl stream1
.p2align 3, 0x0
stream1:
.quad 0
.size stream1, 8
.type stream2,@object # @stream2
.globl stream2
.p2align 3, 0x0
stream2:
.quad 0
.size stream2, 8
.type a,@object # @a
.globl a
.p2align 3, 0x0
a:
.quad 0
.size a, 8
.type b,@object # @b
.globl b
.p2align 3, 0x0
b:
.quad 0
.size b, 8
.type c1,@object # @c1
.globl c1
.p2align 3, 0x0
c1:
.quad 0
.size c1, 8
.type c2,@object # @c2
.globl c2
.p2align 3, 0x0
c2:
.quad 0
.size c2, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "hipEventCreate(&event_start1)"
.size .L.str.1, 30
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/jinmin527/learning-cuda-trt/main/cuda-runtime-api-1.5.2-parallel/src/main.hip"
.size .L.str.2, 135
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "hipEventCreate(&event_stop1)"
.size .L.str.3, 29
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "hipEventCreate(&event_start2)"
.size .L.str.4, 30
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "hipEventCreate(&event_stop2)"
.size .L.str.5, 29
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "hipEventRecord(event_start1, stream1)"
.size .L.str.6, 38
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "hipEventRecord(event_stop1, stream1)"
.size .L.str.7, 37
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "hipEventRecord(event_start2, stream2)"
.size .L.str.8, 38
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "hipEventRecord(event_stop2, stream2)"
.size .L.str.9, 37
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "hipStreamSynchronize(stream1)"
.size .L.str.10, 30
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "hipStreamSynchronize(stream2)"
.size .L.str.11, 30
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "hipEventElapsedTime(&time1, event_start1, event_stop1)"
.size .L.str.12, 55
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "hipEventElapsedTime(&time2, event_start2, event_stop2)"
.size .L.str.13, 55
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "async: time1 = %.2f ms, time2 = %.2f ms, count = %.2f ms\n"
.size .L.str.14, 58
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz "sync: time1 = %.2f ms, count = %.2f ms\n"
.size .L.str.15, 40
.type .L.str.16,@object # @.str.16
.L.str.16:
.asciz "hipEventCreate(&event_async)"
.size .L.str.16, 29
.type .L.str.17,@object # @.str.17
.L.str.17:
.asciz "hipStreamWaitEvent(stream1, event_async)"
.size .L.str.17, 41
.type .L.str.18,@object # @.str.18
.L.str.18:
.asciz "hipEventRecord(event_async, stream2)"
.size .L.str.18, 37
.type .L.str.20,@object # @.str.20
.L.str.20:
.asciz "hipStreamCreate(&stream1)"
.size .L.str.20, 26
.type .L.str.21,@object # @.str.21
.L.str.21:
.asciz "hipStreamCreate(&stream2)"
.size .L.str.21, 26
.type .L.str.22,@object # @.str.22
.L.str.22:
.asciz "hipMalloc(&a, bytes)"
.size .L.str.22, 21
.type .L.str.23,@object # @.str.23
.L.str.23:
.asciz "hipMalloc(&b, bytes)"
.size .L.str.23, 21
.type .L.str.24,@object # @.str.24
.L.str.24:
.asciz "hipMalloc(&c1, bytes)"
.size .L.str.24, 22
.type .L.str.25,@object # @.str.25
.L.str.25:
.asciz "hipMalloc(&c2, bytes)"
.size .L.str.25, 22
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10add_vectorPKfS0_Pfi"
.size .L__unnamed_1, 24
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z10mul_vectorPKfS0_Pfi"
.size .L__unnamed_2, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "multi_stream_async done."
.size .Lstr, 25
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__add_vectorPKfS0_Pfi
.addrsig_sym _Z25__device_stub__mul_vectorPKfS0_Pfi
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10add_vectorPKfS0_Pfi
.addrsig_sym _Z10mul_vectorPKfS0_Pfi
.addrsig_sym stream1
.addrsig_sym stream2
.addrsig_sym a
.addrsig_sym b
.addrsig_sym c1
.addrsig_sym c2
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10mul_vectorPKfS0_Pfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00d0*/ FMUL R9, R4, R3 ; /* 0x0000000304097220 */
/* 0x004fca0000400000 */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z10add_vectorPKfS0_Pfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10add_vectorPKfS0_Pfi
.globl _Z10add_vectorPKfS0_Pfi
.p2align 8
.type _Z10add_vectorPKfS0_Pfi,@function
_Z10add_vectorPKfS0_Pfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10add_vectorPKfS0_Pfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10add_vectorPKfS0_Pfi, .Lfunc_end0-_Z10add_vectorPKfS0_Pfi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z10mul_vectorPKfS0_Pfi
.globl _Z10mul_vectorPKfS0_Pfi
.p2align 8
.type _Z10mul_vectorPKfS0_Pfi,@function
_Z10mul_vectorPKfS0_Pfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB1_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_mul_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10mul_vectorPKfS0_Pfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z10mul_vectorPKfS0_Pfi, .Lfunc_end1-_Z10mul_vectorPKfS0_Pfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10add_vectorPKfS0_Pfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10add_vectorPKfS0_Pfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10mul_vectorPKfS0_Pfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10mul_vectorPKfS0_Pfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | extern "C"
__global__
void vecConv(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t sum = 0;
if ( idx < N ) {
for(size_t i = 0; i <= idx; i++){
sum += l[i] + r[idx-i];
}
p[idx] = sum;
}
} | code for sm_80
Function : vecConv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R13, SR_CTAID.X ; /* 0x00000000000d7919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R13, R13, c[0x0][0x0], R0 ; /* 0x000000000d0d7a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R13, c[0x0][0x178], PT ; /* 0x00005e000d007a0c */
/* 0x000fc80003f06070 */
/*0050*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x17c], PT, P0 ; /* 0x00005f00ff007a0c */
/* 0x000fda0003f06100 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ ISETP.GE.U32.AND P0, PT, R13.reuse, 0x3, PT ; /* 0x000000030d00780c */
/* 0x040fe20003f06070 */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0090*/ IADD3 R8, R13.reuse, 0x1, RZ ; /* 0x000000010d087810 */
/* 0x040fe20007ffe0ff */
/*00a0*/ BSSY B0, 0x380 ; /* 0x000002d000007945 */
/* 0x000fe20003800000 */
/*00b0*/ LEA R2, P2, R13.reuse, c[0x0][0x170], 0x2 ; /* 0x00005c000d027a11 */
/* 0x040fe200078410ff */
/*00c0*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*00d0*/ LOP3.LUT R8, R8, 0x3, RZ, 0xc0, !PT ; /* 0x0000000308087812 */
/* 0x000fe200078ec0ff */
/*00e0*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e00ff */
/*00f0*/ LEA.HI.X R3, R13, c[0x0][0x174], RZ, 0x2, P2 ; /* 0x00005d000d037a11 */
/* 0x000fe200010f14ff */
/*0100*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */
/* 0x000fe200078e00ff */
/*0110*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fc60003f25270 */
/*0120*/ @!P0 BRA 0x370 ; /* 0x0000024000008947 */
/* 0x000fea0003800000 */
/*0130*/ LEA R4, P3, R13.reuse, c[0x0][0x168], 0x2 ; /* 0x00005a000d047a11 */
/* 0x040fe200078610ff */
/*0140*/ IMAD.MOV.U32 R10, RZ, RZ, -0x1 ; /* 0xffffffffff0a7424 */
/* 0x000fe200078e00ff */
/*0150*/ IADD3 R15, P0, P2, -R13.reuse, -0x1, R8 ; /* 0xffffffff0d0f7810 */
/* 0x040fe20007a1e108 */
/*0160*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*0170*/ IADD3 R4, P4, R4, -0x4, RZ ; /* 0xfffffffc04047810 */
/* 0x000fe20007f9e0ff */
/*0180*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e00ff */
/*0190*/ LEA.HI.X R5, R13, c[0x0][0x16c], RZ, 0x2, P3 ; /* 0x00005b000d057a11 */
/* 0x000fe200018f14ff */
/*01a0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff067624 */
/* 0x000fe200078e00ff */
/*01b0*/ IADD3.X R10, R10, -0x1, RZ, P0, P2 ; /* 0xffffffff0a0a7810 */
/* 0x000fe200007e44ff */
/*01c0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff077624 */
/* 0x000fe200078e00ff */
/*01d0*/ IADD3.X R5, R5, -0x1, RZ, P4, !PT ; /* 0xffffffff05057810 */
/* 0x000fca00027fe4ff */
/*01e0*/ LDG.E R12, [R4.64+0x4] ; /* 0x00000404040c7981 */
/* 0x0000a8000c1e1900 */
/*01f0*/ LDG.E R17, [R6.64] ; /* 0x0000000406117981 */
/* 0x000ea8000c1e1900 */
/*0200*/ LDG.E R14, [R4.64] ; /* 0x00000004040e7981 */
/* 0x0000e8000c1e1900 */
/*0210*/ LDG.E R19, [R6.64+0x4] ; /* 0x0000040406137981 */
/* 0x0002e8000c1e1900 */
/*0220*/ LDG.E R16, [R4.64+-0x4] ; /* 0xfffffc0404107981 */
/* 0x000128000c1e1900 */
/*0230*/ LDG.E R21, [R6.64+0x8] ; /* 0x0000080406157981 */
/* 0x000328000c1e1900 */
/*0240*/ LDG.E R18, [R4.64+-0x8] ; /* 0xfffff80404127981 */
/* 0x000168000c1e1900 */
/*0250*/ LDG.E R23, [R6.64+0xc] ; /* 0x00000c0406177981 */
/* 0x000362000c1e1900 */
/*0260*/ IADD3 R0, P0, R0, 0x4, RZ ; /* 0x0000000400007810 */
/* 0x000fc80007f1e0ff */
/*0270*/ IADD3 R20, P2, R0, R15, RZ ; /* 0x0000000f00147210 */
/* 0x000fe20007f5e0ff */
/*0280*/ IMAD.X R11, RZ, RZ, R11, P0 ; /* 0x000000ffff0b7224 */
/* 0x000fe200000e060b */
/*0290*/ IADD3 R4, P3, R4, -0x10, RZ ; /* 0xfffffff004047810 */
/* 0x001fe40007f7e0ff */
/*02a0*/ ISETP.NE.U32.AND P0, PT, R20, RZ, PT ; /* 0x000000ff1400720c */
/* 0x000fe40003f05070 */
/*02b0*/ IADD3.X R5, R5, -0x1, RZ, P3, !PT ; /* 0xffffffff05057810 */
/* 0x000fe20001ffe4ff */
/*02c0*/ IMAD.IADD R12, R12, 0x1, R17 ; /* 0x000000010c0c7824 */
/* 0x004fe400078e0211 */
/*02d0*/ IMAD.X R17, R11, 0x1, R10, P2 ; /* 0x000000010b117824 */
/* 0x000fe200010e060a */
/*02e0*/ IADD3 R6, P2, R6, 0x10, RZ ; /* 0x0000001006067810 */
/* 0x002fc80007f5e0ff */
/*02f0*/ ISETP.NE.AND.EX P0, PT, R17, RZ, PT, P0 ; /* 0x000000ff1100720c */
/* 0x000fe20003f05300 */
/*0300*/ IMAD.IADD R14, R14, 0x1, R19 ; /* 0x000000010e0e7824 */
/* 0x008fe400078e0213 */
/*0310*/ IMAD.X R7, RZ, RZ, R7, P2 ; /* 0x000000ffff077224 */
/* 0x000fc600010e0607 */
/*0320*/ IADD3 R9, R14, R9, R12 ; /* 0x000000090e097210 */
/* 0x000fe20007ffe00c */
/*0330*/ IMAD.IADD R16, R16, 0x1, R21 ; /* 0x0000000110107824 */
/* 0x010fe400078e0215 */
/*0340*/ IMAD.IADD R23, R18, 0x1, R23 ; /* 0x0000000112177824 */
/* 0x020fca00078e0217 */
/*0350*/ IADD3 R9, R23, R9, R16 ; /* 0x0000000917097210 */
/* 0x000fe20007ffe010 */
/*0360*/ @P0 BRA 0x1e0 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0370*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0380*/ BSSY B0, 0x510 ; /* 0x0000018000007945 */
/* 0x000fe20003800000 */
/*0390*/ @!P1 BRA 0x500 ; /* 0x0000016000009947 */
/* 0x000fea0003800000 */
/*03a0*/ IADD3 R5, P0, R13, -R0, RZ ; /* 0x800000000d057210 */
/* 0x000fe40007f1e0ff */
/*03b0*/ IADD3 R8, P3, RZ, -R8, RZ ; /* 0x80000008ff087210 */
/* 0x000fe40007f7e0ff */
/*03c0*/ LEA R6, P2, R0.reuse, c[0x0][0x160], 0x2 ; /* 0x0000580000067a11 */
/* 0x040fe200078410ff */
/*03d0*/ IMAD.X R12, RZ, RZ, ~R11.reuse, P0 ; /* 0x000000ffff0c7224 */
/* 0x100fe200000e0e0b */
/*03e0*/ LEA R4, P1, R5, c[0x0][0x168], 0x2 ; /* 0x00005a0005047a11 */
/* 0x000fe200078210ff */
/*03f0*/ IMAD.X R10, RZ, RZ, -0x1, P3 ; /* 0xffffffffff0a7424 */
/* 0x000fe200018e06ff */
/*0400*/ LEA.HI.X R11, R0, c[0x0][0x164], R11, 0x2, P2 ; /* 0x00005900000b7a11 */
/* 0x000fc400010f140b */
/*0410*/ LEA.HI.X R5, R5, c[0x0][0x16c], R12, 0x2, P1 ; /* 0x00005b0005057a11 */
/* 0x000fc800008f140c */
/*0420*/ IMAD.MOV.U32 R7, RZ, RZ, R11 ; /* 0x000000ffff077224 */
/* 0x000fe200078e000b */
/*0430*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x0000aa000c1e1900 */
/*0440*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */
/* 0x0002a2000c1e1900 */
/*0450*/ IADD3 R8, P0, R8, 0x1, RZ ; /* 0x0000000108087810 */
/* 0x000fe40007f1e0ff */
/*0460*/ IADD3 R4, P1, R4, -0x4, RZ ; /* 0xfffffffc04047810 */
/* 0x001fc60007f3e0ff */
/*0470*/ IMAD.X R10, RZ, RZ, R10, P0 ; /* 0x000000ffff0a7224 */
/* 0x000fe200000e060a */
/*0480*/ ISETP.NE.U32.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f05070 */
/*0490*/ IADD3.X R5, R5, -0x1, RZ, P1, !PT ; /* 0xffffffff05057810 */
/* 0x000fe40000ffe4ff */
/*04a0*/ ISETP.NE.AND.EX P0, PT, R10, RZ, PT, P0 ; /* 0x000000ff0a00720c */
/* 0x000fe40003f05300 */
/*04b0*/ IADD3 R6, P2, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fca0007f5e0ff */
/*04c0*/ IMAD.X R11, RZ, RZ, R11, P2 ; /* 0x000000ffff0b7224 */
/* 0x000fe400010e060b */
/*04d0*/ IMAD.IADD R0, R0, 0x1, R7 ; /* 0x0000000100007824 */
/* 0x004fc800078e0207 */
/*04e0*/ IMAD.IADD R9, R0, 0x1, R9 ; /* 0x0000000100097824 */
/* 0x000fe200078e0209 */
/*04f0*/ @P0 BRA 0x420 ; /* 0xffffff2000000947 */
/* 0x000fea000383ffff */
/*0500*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0510*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe2000c101904 */
/*0520*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0530*/ BRA 0x530; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | extern "C"
__global__
void vecConv(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t sum = 0;
if ( idx < N ) {
for(size_t i = 0; i <= idx; i++){
sum += l[i] + r[idx-i];
}
p[idx] = sum;
}
} | .file "tmpxft_00018e93_00000000-6_vecConv.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m
.type _Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m, @function
_Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq vecConv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m, .-_Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m
.globl vecConv
.type vecConv, @function
vecConv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size vecConv, .-vecConv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "vecConv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq vecConv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | extern "C"
__global__
void vecConv(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t sum = 0;
if ( idx < N ) {
for(size_t i = 0; i <= idx; i++){
sum += l[i] + r[idx-i];
}
p[idx] = sum;
}
} | #include <hip/hip_runtime.h>
extern "C"
__global__
void vecConv(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t sum = 0;
if ( idx < N ) {
for(size_t i = 0; i <= idx; i++){
sum += l[i] + r[idx-i];
}
p[idx] = sum;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
extern "C"
__global__
void vecConv(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t sum = 0;
if ( idx < N ) {
for(size_t i = 0; i <= idx; i++){
sum += l[i] + r[idx-i];
}
p[idx] = sum;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected vecConv
.globl vecConv
.p2align 8
.type vecConv,@function
vecConv:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b64 v[5:6], 2, v[1:2]
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v4, 0
s_mov_b32 s3, 0
.p2align 6
.LBB0_2:
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s6, v5
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v6, vcc_lo
s_load_b32 s2, s[4:5], 0x0
v_add_co_u32 v5, vcc_lo, v5, -4
global_load_b32 v0, v[7:8], off
v_add_co_ci_u32_e32 v6, vcc_lo, -1, v6, vcc_lo
s_add_u32 s4, s4, 4
s_addc_u32 s5, s5, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, -4, v[5:6]
s_or_b32 s3, vcc_lo, s3
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_nc_u32_e32 v0, s2, v0
v_add_co_u32 v3, s2, v3, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s2, 0, v4, s2
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_2
s_or_b32 exec_lo, exec_lo, s3
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v3, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel vecConv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size vecConv, .Lfunc_end0-vecConv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: vecConv
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: vecConv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
extern "C"
__global__
void vecConv(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t sum = 0;
if ( idx < N ) {
for(size_t i = 0; i <= idx; i++){
sum += l[i] + r[idx-i];
}
p[idx] = sum;
}
} | .text
.file "vecConv.hip"
.globl __device_stub__vecConv # -- Begin function __device_stub__vecConv
.p2align 4, 0x90
.type __device_stub__vecConv,@function
__device_stub__vecConv: # @__device_stub__vecConv
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $vecConv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__vecConv, .Lfunc_end0-__device_stub__vecConv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $vecConv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type vecConv,@object # @vecConv
.section .rodata,"a",@progbits
.globl vecConv
.p2align 3, 0x0
vecConv:
.quad __device_stub__vecConv
.size vecConv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "vecConv"
.size .L__unnamed_1, 8
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__vecConv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym vecConv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : vecConv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R13, SR_CTAID.X ; /* 0x00000000000d7919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R13, R13, c[0x0][0x0], R0 ; /* 0x000000000d0d7a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R13, c[0x0][0x178], PT ; /* 0x00005e000d007a0c */
/* 0x000fc80003f06070 */
/*0050*/ ISETP.GE.U32.AND.EX P0, PT, RZ, c[0x0][0x17c], PT, P0 ; /* 0x00005f00ff007a0c */
/* 0x000fda0003f06100 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ ISETP.GE.U32.AND P0, PT, R13.reuse, 0x3, PT ; /* 0x000000030d00780c */
/* 0x040fe20003f06070 */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0090*/ IADD3 R8, R13.reuse, 0x1, RZ ; /* 0x000000010d087810 */
/* 0x040fe20007ffe0ff */
/*00a0*/ BSSY B0, 0x380 ; /* 0x000002d000007945 */
/* 0x000fe20003800000 */
/*00b0*/ LEA R2, P2, R13.reuse, c[0x0][0x170], 0x2 ; /* 0x00005c000d027a11 */
/* 0x040fe200078410ff */
/*00c0*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*00d0*/ LOP3.LUT R8, R8, 0x3, RZ, 0xc0, !PT ; /* 0x0000000308087812 */
/* 0x000fe200078ec0ff */
/*00e0*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e00ff */
/*00f0*/ LEA.HI.X R3, R13, c[0x0][0x174], RZ, 0x2, P2 ; /* 0x00005d000d037a11 */
/* 0x000fe200010f14ff */
/*0100*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */
/* 0x000fe200078e00ff */
/*0110*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fc60003f25270 */
/*0120*/ @!P0 BRA 0x370 ; /* 0x0000024000008947 */
/* 0x000fea0003800000 */
/*0130*/ LEA R4, P3, R13.reuse, c[0x0][0x168], 0x2 ; /* 0x00005a000d047a11 */
/* 0x040fe200078610ff */
/*0140*/ IMAD.MOV.U32 R10, RZ, RZ, -0x1 ; /* 0xffffffffff0a7424 */
/* 0x000fe200078e00ff */
/*0150*/ IADD3 R15, P0, P2, -R13.reuse, -0x1, R8 ; /* 0xffffffff0d0f7810 */
/* 0x040fe20007a1e108 */
/*0160*/ IMAD.MOV.U32 R0, RZ, RZ, RZ ; /* 0x000000ffff007224 */
/* 0x000fe200078e00ff */
/*0170*/ IADD3 R4, P4, R4, -0x4, RZ ; /* 0xfffffffc04047810 */
/* 0x000fe20007f9e0ff */
/*0180*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e00ff */
/*0190*/ LEA.HI.X R5, R13, c[0x0][0x16c], RZ, 0x2, P3 ; /* 0x00005b000d057a11 */
/* 0x000fe200018f14ff */
/*01a0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff067624 */
/* 0x000fe200078e00ff */
/*01b0*/ IADD3.X R10, R10, -0x1, RZ, P0, P2 ; /* 0xffffffff0a0a7810 */
/* 0x000fe200007e44ff */
/*01c0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff077624 */
/* 0x000fe200078e00ff */
/*01d0*/ IADD3.X R5, R5, -0x1, RZ, P4, !PT ; /* 0xffffffff05057810 */
/* 0x000fca00027fe4ff */
/*01e0*/ LDG.E R12, [R4.64+0x4] ; /* 0x00000404040c7981 */
/* 0x0000a8000c1e1900 */
/*01f0*/ LDG.E R17, [R6.64] ; /* 0x0000000406117981 */
/* 0x000ea8000c1e1900 */
/*0200*/ LDG.E R14, [R4.64] ; /* 0x00000004040e7981 */
/* 0x0000e8000c1e1900 */
/*0210*/ LDG.E R19, [R6.64+0x4] ; /* 0x0000040406137981 */
/* 0x0002e8000c1e1900 */
/*0220*/ LDG.E R16, [R4.64+-0x4] ; /* 0xfffffc0404107981 */
/* 0x000128000c1e1900 */
/*0230*/ LDG.E R21, [R6.64+0x8] ; /* 0x0000080406157981 */
/* 0x000328000c1e1900 */
/*0240*/ LDG.E R18, [R4.64+-0x8] ; /* 0xfffff80404127981 */
/* 0x000168000c1e1900 */
/*0250*/ LDG.E R23, [R6.64+0xc] ; /* 0x00000c0406177981 */
/* 0x000362000c1e1900 */
/*0260*/ IADD3 R0, P0, R0, 0x4, RZ ; /* 0x0000000400007810 */
/* 0x000fc80007f1e0ff */
/*0270*/ IADD3 R20, P2, R0, R15, RZ ; /* 0x0000000f00147210 */
/* 0x000fe20007f5e0ff */
/*0280*/ IMAD.X R11, RZ, RZ, R11, P0 ; /* 0x000000ffff0b7224 */
/* 0x000fe200000e060b */
/*0290*/ IADD3 R4, P3, R4, -0x10, RZ ; /* 0xfffffff004047810 */
/* 0x001fe40007f7e0ff */
/*02a0*/ ISETP.NE.U32.AND P0, PT, R20, RZ, PT ; /* 0x000000ff1400720c */
/* 0x000fe40003f05070 */
/*02b0*/ IADD3.X R5, R5, -0x1, RZ, P3, !PT ; /* 0xffffffff05057810 */
/* 0x000fe20001ffe4ff */
/*02c0*/ IMAD.IADD R12, R12, 0x1, R17 ; /* 0x000000010c0c7824 */
/* 0x004fe400078e0211 */
/*02d0*/ IMAD.X R17, R11, 0x1, R10, P2 ; /* 0x000000010b117824 */
/* 0x000fe200010e060a */
/*02e0*/ IADD3 R6, P2, R6, 0x10, RZ ; /* 0x0000001006067810 */
/* 0x002fc80007f5e0ff */
/*02f0*/ ISETP.NE.AND.EX P0, PT, R17, RZ, PT, P0 ; /* 0x000000ff1100720c */
/* 0x000fe20003f05300 */
/*0300*/ IMAD.IADD R14, R14, 0x1, R19 ; /* 0x000000010e0e7824 */
/* 0x008fe400078e0213 */
/*0310*/ IMAD.X R7, RZ, RZ, R7, P2 ; /* 0x000000ffff077224 */
/* 0x000fc600010e0607 */
/*0320*/ IADD3 R9, R14, R9, R12 ; /* 0x000000090e097210 */
/* 0x000fe20007ffe00c */
/*0330*/ IMAD.IADD R16, R16, 0x1, R21 ; /* 0x0000000110107824 */
/* 0x010fe400078e0215 */
/*0340*/ IMAD.IADD R23, R18, 0x1, R23 ; /* 0x0000000112177824 */
/* 0x020fca00078e0217 */
/*0350*/ IADD3 R9, R23, R9, R16 ; /* 0x0000000917097210 */
/* 0x000fe20007ffe010 */
/*0360*/ @P0 BRA 0x1e0 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0370*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0380*/ BSSY B0, 0x510 ; /* 0x0000018000007945 */
/* 0x000fe20003800000 */
/*0390*/ @!P1 BRA 0x500 ; /* 0x0000016000009947 */
/* 0x000fea0003800000 */
/*03a0*/ IADD3 R5, P0, R13, -R0, RZ ; /* 0x800000000d057210 */
/* 0x000fe40007f1e0ff */
/*03b0*/ IADD3 R8, P3, RZ, -R8, RZ ; /* 0x80000008ff087210 */
/* 0x000fe40007f7e0ff */
/*03c0*/ LEA R6, P2, R0.reuse, c[0x0][0x160], 0x2 ; /* 0x0000580000067a11 */
/* 0x040fe200078410ff */
/*03d0*/ IMAD.X R12, RZ, RZ, ~R11.reuse, P0 ; /* 0x000000ffff0c7224 */
/* 0x100fe200000e0e0b */
/*03e0*/ LEA R4, P1, R5, c[0x0][0x168], 0x2 ; /* 0x00005a0005047a11 */
/* 0x000fe200078210ff */
/*03f0*/ IMAD.X R10, RZ, RZ, -0x1, P3 ; /* 0xffffffffff0a7424 */
/* 0x000fe200018e06ff */
/*0400*/ LEA.HI.X R11, R0, c[0x0][0x164], R11, 0x2, P2 ; /* 0x00005900000b7a11 */
/* 0x000fc400010f140b */
/*0410*/ LEA.HI.X R5, R5, c[0x0][0x16c], R12, 0x2, P1 ; /* 0x00005b0005057a11 */
/* 0x000fc800008f140c */
/*0420*/ IMAD.MOV.U32 R7, RZ, RZ, R11 ; /* 0x000000ffff077224 */
/* 0x000fe200078e000b */
/*0430*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x0000aa000c1e1900 */
/*0440*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */
/* 0x0002a2000c1e1900 */
/*0450*/ IADD3 R8, P0, R8, 0x1, RZ ; /* 0x0000000108087810 */
/* 0x000fe40007f1e0ff */
/*0460*/ IADD3 R4, P1, R4, -0x4, RZ ; /* 0xfffffffc04047810 */
/* 0x001fc60007f3e0ff */
/*0470*/ IMAD.X R10, RZ, RZ, R10, P0 ; /* 0x000000ffff0a7224 */
/* 0x000fe200000e060a */
/*0480*/ ISETP.NE.U32.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f05070 */
/*0490*/ IADD3.X R5, R5, -0x1, RZ, P1, !PT ; /* 0xffffffff05057810 */
/* 0x000fe40000ffe4ff */
/*04a0*/ ISETP.NE.AND.EX P0, PT, R10, RZ, PT, P0 ; /* 0x000000ff0a00720c */
/* 0x000fe40003f05300 */
/*04b0*/ IADD3 R6, P2, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fca0007f5e0ff */
/*04c0*/ IMAD.X R11, RZ, RZ, R11, P2 ; /* 0x000000ffff0b7224 */
/* 0x000fe400010e060b */
/*04d0*/ IMAD.IADD R0, R0, 0x1, R7 ; /* 0x0000000100007824 */
/* 0x004fc800078e0207 */
/*04e0*/ IMAD.IADD R9, R0, 0x1, R9 ; /* 0x0000000100097824 */
/* 0x000fe200078e0209 */
/*04f0*/ @P0 BRA 0x420 ; /* 0xffffff2000000947 */
/* 0x000fea000383ffff */
/*0500*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0510*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x000fe2000c101904 */
/*0520*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0530*/ BRA 0x530; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected vecConv
.globl vecConv
.p2align 8
.type vecConv,@function
vecConv:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b64 v[5:6], 2, v[1:2]
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v4, 0
s_mov_b32 s3, 0
.p2align 6
.LBB0_2:
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s6, v5
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v6, vcc_lo
s_load_b32 s2, s[4:5], 0x0
v_add_co_u32 v5, vcc_lo, v5, -4
global_load_b32 v0, v[7:8], off
v_add_co_ci_u32_e32 v6, vcc_lo, -1, v6, vcc_lo
s_add_u32 s4, s4, 4
s_addc_u32 s5, s5, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, -4, v[5:6]
s_or_b32 s3, vcc_lo, s3
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_nc_u32_e32 v0, s2, v0
v_add_co_u32 v3, s2, v3, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s2, 0, v4, s2
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_2
s_or_b32 exec_lo, exec_lo, s3
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v3, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel vecConv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size vecConv, .Lfunc_end0-vecConv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 8
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: vecConv
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: vecConv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00018e93_00000000-6_vecConv.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m
.type _Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m, @function
_Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq vecConv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m, .-_Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m
.globl vecConv
.type vecConv, @function
vecConv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z7vecConvPiS_S_mPiS_S_m
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size vecConv, .-vecConv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "vecConv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq vecConv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "vecConv.hip"
.globl __device_stub__vecConv # -- Begin function __device_stub__vecConv
.p2align 4, 0x90
.type __device_stub__vecConv,@function
__device_stub__vecConv: # @__device_stub__vecConv
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $vecConv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__vecConv, .Lfunc_end0-__device_stub__vecConv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $vecConv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type vecConv,@object # @vecConv
.section .rodata,"a",@progbits
.globl vecConv
.p2align 3, 0x0
vecConv:
.quad __device_stub__vecConv
.size vecConv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "vecConv"
.size .L__unnamed_1, 8
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__vecConv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym vecConv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
__global__ void kernel(int* a_d, int* b_d, int* c_d){
*c_d = *a_d + *b_d;
return;
}
int main(){
int a = 1, b = 2;
int *a_d, *b_d, *c_d;
cudaMalloc((void**) &a_d, sizeof(int));
cudaMalloc((void**) &b_d, sizeof(int));
cudaMalloc((void**) &c_d, sizeof(int));
cudaMemcpy(a_d, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, &b, sizeof(int), cudaMemcpyHostToDevice);
int c;
kernel<<<1, 1>>>(a_d, b_d, c_d);
cudaMemcpy(&c, c_d, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree((void**) a_d);
cudaFree((void**) b_d);
cudaFree((void**) c_d);
printf("%d\n", c);
} | code for sm_80
Function : _Z6kernelPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */
/* 0x000fe200078e00ff */
/*0020*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fe20000000f00 */
/*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */
/* 0x000fe200078e00ff */
/*0040*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */
/* 0x000fe20000000f00 */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0060*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff067624 */
/* 0x000fe200078e00ff */
/*0090*/ MOV R7, c[0x0][0x174] ; /* 0x00005d0000077a02 */
/* 0x000fe40000000f00 */
/*00a0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00b0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
__global__ void kernel(int* a_d, int* b_d, int* c_d){
*c_d = *a_d + *b_d;
return;
}
int main(){
int a = 1, b = 2;
int *a_d, *b_d, *c_d;
cudaMalloc((void**) &a_d, sizeof(int));
cudaMalloc((void**) &b_d, sizeof(int));
cudaMalloc((void**) &c_d, sizeof(int));
cudaMemcpy(a_d, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, &b, sizeof(int), cudaMemcpyHostToDevice);
int c;
kernel<<<1, 1>>>(a_d, b_d, c_d);
cudaMemcpy(&c, c_d, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree((void**) a_d);
cudaFree((void**) b_d);
cudaFree((void**) c_d);
printf("%d\n", c);
} | .file "tmpxft_0008c412_00000000-6_cudamemcp.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z6kernelPiS_S_PiS_S_
.type _Z29__device_stub__Z6kernelPiS_S_PiS_S_, @function
_Z29__device_stub__Z6kernelPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6kernelPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z29__device_stub__Z6kernelPiS_S_PiS_S_, .-_Z29__device_stub__Z6kernelPiS_S_PiS_S_
.globl _Z6kernelPiS_S_
.type _Z6kernelPiS_S_, @function
_Z6kernelPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6kernelPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelPiS_S_, .-_Z6kernelPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $1, (%rsp)
movl $2, 4(%rsp)
leaq 8(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movq %rsp, %rsi
movl $1, %ecx
movl $4, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 4(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 44(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 24(%rsp), %rsi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movl 44(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z29__device_stub__Z6kernelPiS_S_PiS_S_
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z6kernelPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
__global__ void kernel(int* a_d, int* b_d, int* c_d){
*c_d = *a_d + *b_d;
return;
}
int main(){
int a = 1, b = 2;
int *a_d, *b_d, *c_d;
cudaMalloc((void**) &a_d, sizeof(int));
cudaMalloc((void**) &b_d, sizeof(int));
cudaMalloc((void**) &c_d, sizeof(int));
cudaMemcpy(a_d, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, &b, sizeof(int), cudaMemcpyHostToDevice);
int c;
kernel<<<1, 1>>>(a_d, b_d, c_d);
cudaMemcpy(&c, c_d, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree((void**) a_d);
cudaFree((void**) b_d);
cudaFree((void**) c_d);
printf("%d\n", c);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void kernel(int* a_d, int* b_d, int* c_d){
*c_d = *a_d + *b_d;
return;
}
int main(){
int a = 1, b = 2;
int *a_d, *b_d, *c_d;
hipMalloc((void**) &a_d, sizeof(int));
hipMalloc((void**) &b_d, sizeof(int));
hipMalloc((void**) &c_d, sizeof(int));
hipMemcpy(a_d, &a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(b_d, &b, sizeof(int), hipMemcpyHostToDevice);
int c;
kernel<<<1, 1>>>(a_d, b_d, c_d);
hipMemcpy(&c, c_d, sizeof(int), hipMemcpyDeviceToHost);
hipFree((void**) a_d);
hipFree((void**) b_d);
hipFree((void**) c_d);
printf("%d\n", c);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void kernel(int* a_d, int* b_d, int* c_d){
*c_d = *a_d + *b_d;
return;
}
int main(){
int a = 1, b = 2;
int *a_d, *b_d, *c_d;
hipMalloc((void**) &a_d, sizeof(int));
hipMalloc((void**) &b_d, sizeof(int));
hipMalloc((void**) &c_d, sizeof(int));
hipMemcpy(a_d, &a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(b_d, &b, sizeof(int), hipMemcpyHostToDevice);
int c;
kernel<<<1, 1>>>(a_d, b_d, c_d);
hipMemcpy(&c, c_d, sizeof(int), hipMemcpyDeviceToHost);
hipFree((void**) a_d);
hipFree((void**) b_d);
hipFree((void**) c_d);
printf("%d\n", c);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPiS_S_
.globl _Z6kernelPiS_S_
.p2align 8
.type _Z6kernelPiS_S_,@function
_Z6kernelPiS_S_:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[4:5], 0x0
s_load_b32 s3, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPiS_S_, .Lfunc_end0-_Z6kernelPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z6kernelPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void kernel(int* a_d, int* b_d, int* c_d){
*c_d = *a_d + *b_d;
return;
}
int main(){
int a = 1, b = 2;
int *a_d, *b_d, *c_d;
hipMalloc((void**) &a_d, sizeof(int));
hipMalloc((void**) &b_d, sizeof(int));
hipMalloc((void**) &c_d, sizeof(int));
hipMemcpy(a_d, &a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(b_d, &b, sizeof(int), hipMemcpyHostToDevice);
int c;
kernel<<<1, 1>>>(a_d, b_d, c_d);
hipMemcpy(&c, c_d, sizeof(int), hipMemcpyDeviceToHost);
hipFree((void**) a_d);
hipFree((void**) b_d);
hipFree((void**) c_d);
printf("%d\n", c);
} | .text
.file "cudamemcp.hip"
.globl _Z21__device_stub__kernelPiS_S_ # -- Begin function _Z21__device_stub__kernelPiS_S_
.p2align 4, 0x90
.type _Z21__device_stub__kernelPiS_S_,@function
_Z21__device_stub__kernelPiS_S_: # @_Z21__device_stub__kernelPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPiS_S_, .Lfunc_end0-_Z21__device_stub__kernelPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movl $1, 28(%rsp)
movl $2, 24(%rsp)
leaq 16(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq %rsp, %rdi
movl $4, %esi
callq hipMalloc
movq 16(%rsp), %rdi
leaq 28(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 24(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
movq %rdx, 112(%rsp)
leaq 128(%rsp), %rax
movq %rax, 32(%rsp)
leaq 120(%rsp), %rax
movq %rax, 40(%rsp)
leaq 112(%rsp), %rax
movq %rax, 48(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z6kernelPiS_S_, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq (%rsp), %rsi
leaq 32(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movl 32(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPiS_S_,@object # @_Z6kernelPiS_S_
.section .rodata,"a",@progbits
.globl _Z6kernelPiS_S_
.p2align 3, 0x0
_Z6kernelPiS_S_:
.quad _Z21__device_stub__kernelPiS_S_
.size _Z6kernelPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d\n"
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelPiS_S_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6kernelPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */
/* 0x000fe200078e00ff */
/*0020*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fe20000000f00 */
/*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */
/* 0x000fe200078e00ff */
/*0040*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */
/* 0x000fe20000000f00 */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0060*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff067624 */
/* 0x000fe200078e00ff */
/*0090*/ MOV R7, c[0x0][0x174] ; /* 0x00005d0000077a02 */
/* 0x000fe40000000f00 */
/*00a0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00b0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPiS_S_
.globl _Z6kernelPiS_S_
.p2align 8
.type _Z6kernelPiS_S_,@function
_Z6kernelPiS_S_:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[4:5], 0x0
s_load_b32 s3, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPiS_S_, .Lfunc_end0-_Z6kernelPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z6kernelPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0008c412_00000000-6_cudamemcp.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z6kernelPiS_S_PiS_S_
.type _Z29__device_stub__Z6kernelPiS_S_PiS_S_, @function
_Z29__device_stub__Z6kernelPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6kernelPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z29__device_stub__Z6kernelPiS_S_PiS_S_, .-_Z29__device_stub__Z6kernelPiS_S_PiS_S_
.globl _Z6kernelPiS_S_
.type _Z6kernelPiS_S_, @function
_Z6kernelPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6kernelPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelPiS_S_, .-_Z6kernelPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $1, (%rsp)
movl $2, 4(%rsp)
leaq 8(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movq %rsp, %rsi
movl $1, %ecx
movl $4, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 4(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 44(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 24(%rsp), %rsi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movl 44(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z29__device_stub__Z6kernelPiS_S_PiS_S_
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z6kernelPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudamemcp.hip"
.globl _Z21__device_stub__kernelPiS_S_ # -- Begin function _Z21__device_stub__kernelPiS_S_
.p2align 4, 0x90
.type _Z21__device_stub__kernelPiS_S_,@function
_Z21__device_stub__kernelPiS_S_: # @_Z21__device_stub__kernelPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPiS_S_, .Lfunc_end0-_Z21__device_stub__kernelPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movl $1, 28(%rsp)
movl $2, 24(%rsp)
leaq 16(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq %rsp, %rdi
movl $4, %esi
callq hipMalloc
movq 16(%rsp), %rdi
leaq 28(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 24(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
movq %rdx, 112(%rsp)
leaq 128(%rsp), %rax
movq %rax, 32(%rsp)
leaq 120(%rsp), %rax
movq %rax, 40(%rsp)
leaq 112(%rsp), %rax
movq %rax, 48(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z6kernelPiS_S_, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq (%rsp), %rsi
leaq 32(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movl 32(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPiS_S_,@object # @_Z6kernelPiS_S_
.section .rodata,"a",@progbits
.globl _Z6kernelPiS_S_
.p2align 3, 0x0
_Z6kernelPiS_S_:
.quad _Z21__device_stub__kernelPiS_S_
.size _Z6kernelPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d\n"
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelPiS_S_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* file name: TilingMatrix.cu
* NOTE:
* squareMatrixMult is much more efficent than the regular multiplier
* currently compiling with: nvcc TilingMatrix.cu -o tileTest
* Device Standards for: GeForce GTX 1060 6GB
* total global mem size: 6078 MBytes (6373572608 bytes)
* total shared mem per block: 49.152 KBytes (49152 bytes)
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <stdlib.h>
#include <sys/time.h> //measuring performance data
#define BLOCK_SIZE 32
/**********************************************************************
function name: matrixTriUpper
description: sets a matrix to an upper bound triangle matrix
parameters:
&a GPU device pointer to a m X n matrix (A)
Note:
return: none
**********************************************************************/
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
}
/**********************************************************************
function name: matrixMult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
**********************************************************************/
__global__ void matrixMult(float *a, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m) {
for(int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * k + col];
c[row * k + col] = sum;
}
}
/**********************************************************************
function name: squareMatrixMult
description: dot product of two matrix (not only square) in GPU
parameters:
&a GPU device pointer to a n X n matrix (A)
&b GPU device pointer to a n X n matrix (B)
&c GPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
SQUARE IS MUCH MORE EFFICENT THAN REGULAR
return: none
**********************************************************************/
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
}
/**********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
**********************************************************************/
int main(int argc, char** argv) {
int printAllMat = 1; // debug flag for printing all of the maticies
// Set sizes of the matrixes
int m=15;
int n=15;
int k=15;
/* Fixed seed for illustration */
srand(3333);
// Allocate memory in host RAM
float *copyA, *copyB, *copyC;
cudaMallocHost((void **) ©A, sizeof(float)*m*n); // copied matrix is m x n
cudaMallocHost((void **) ©B, sizeof(float)*n*k); // copied matrix is n x k
cudaMallocHost((void **) ©C, sizeof(float)*m*k); // copied matrix is m x k
// float x = (float)rand()/(float)(RAND_MAX/a);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
copyA[i * n + j] =((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
copyB[i * k + j] = ((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// Allocate memory space on the device
float *matA, *matB, *matC;
cudaMalloc((void **) &matA, sizeof(float)*m*n); // matrix is m x n
cudaMalloc((void **) &matB, sizeof(float)*n*k); // matrix is n x k
cudaMalloc((void **) &matC, sizeof(float)*m*k); // matrix is m x k
// copy matrix A and B from host to device memory
cudaMemcpy(matA, copyA, sizeof(float)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(matB, copyB, sizeof(float)*n*k, cudaMemcpyHostToDevice);
printf("size of matA %dX%d: %zu bytes\n", m,n,(sizeof(float)*m*n));
printf("size of matB %dX%d: %zu bytes\n", n,k,(sizeof(float)*n*k));
printf("size of matC %dX%d: %zu bytes\n", m,k,(sizeof(float)*m*k));
printf("total bytes allocated to mem: %zu bytes ", ((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)));
printf("(~%zu MBytes)\n\n", (((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)) / 1000000)); // get megabytes of the allocated arrays
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
printf("Calculating...\n\n");
// Launch kernel, check if it is a square
if(m == n && n == k) {
matrixTriUpper<<<dimGrid, dimBlock>>>(matA, m, n);
matrixTriUpper<<<dimGrid, dimBlock>>>(matB, n, k);
squareMatrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, n); // square, thus only need 1 param to define size
}
else { // not a square, thus it needs param to define all sizes
matrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, m, n, k);
}
// Transefr results from device to host
cudaMemcpy(copyC, matC, sizeof(float)*m*k, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize(); //possibly
//cudaThreadSynchronize();
//prints the matricies
// printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]); //Another possible way to print the matrix
//if the debug flag is on it will print the first two product arrays as well
int i,j;
if(printAllMat == 1) {
// print matrix A
printf("matA matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
//printf("[%d][%d]:%d, ", i, j, copyA[i*k + j]);
printf(" %f ", copyA[i*k + j]);
}
printf("\n");
}
// print matrix B
printf("\nmatB matrix: \n");
for (i = 0; i < n; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyB[i*k + j]);
printf(" %f ", copyB[i*k + j]);
}
printf("\n");
}
}
// print result matrix
printf("\nResult matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]);
printf(" %f ", copyC[i*k + j]);
}
printf("\n");
}
// free memory
cudaFree(matA);
cudaFree(matB);
cudaFree(matC);
cudaFreeHost(copyA);
cudaFreeHost(copyB);
cudaFreeHost(copyC);
return 0;
} | .file "tmpxft_00020a05_00000000-6_TilingProgram.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2073:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z36__device_stub__Z14matrixTriUpperPfiiPfii
.type _Z36__device_stub__Z14matrixTriUpperPfiiPfii, @function
_Z36__device_stub__Z14matrixTriUpperPfiiPfii:
.LFB2095:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z14matrixTriUpperPfii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2095:
.size _Z36__device_stub__Z14matrixTriUpperPfiiPfii, .-_Z36__device_stub__Z14matrixTriUpperPfiiPfii
.globl _Z14matrixTriUpperPfii
.type _Z14matrixTriUpperPfii, @function
_Z14matrixTriUpperPfii:
.LFB2096:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z14matrixTriUpperPfiiPfii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _Z14matrixTriUpperPfii, .-_Z14matrixTriUpperPfii
.globl _Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii
.type _Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii, @function
_Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii:
.LFB2097:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z10matrixMultPfS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii, .-_Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii
.globl _Z10matrixMultPfS_S_iii
.type _Z10matrixMultPfS_S_iii, @function
_Z10matrixMultPfS_S_iii:
.LFB2098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _Z10matrixMultPfS_S_iii, .-_Z10matrixMultPfS_S_iii
.globl _Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i
.type _Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i, @function
_Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i:
.LFB2099:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16squareMatrixMultPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2099:
.size _Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i, .-_Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i
.globl _Z16squareMatrixMultPfS_S_i
.type _Z16squareMatrixMultPfS_S_i, @function
_Z16squareMatrixMultPfS_S_i:
.LFB2100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2100:
.size _Z16squareMatrixMultPfS_S_i, .-_Z16squareMatrixMultPfS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "size of matA %dX%d: %zu bytes\n"
.align 8
.LC3:
.string "size of matB %dX%d: %zu bytes\n"
.align 8
.LC4:
.string "size of matC %dX%d: %zu bytes\n"
.align 8
.LC5:
.string "total bytes allocated to mem: %zu bytes "
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "(~%zu MBytes)\n\n"
.LC7:
.string "Calculating...\n\n"
.LC8:
.string "matA matrix: \n"
.LC9:
.string " %f "
.LC10:
.string "\n"
.LC11:
.string "\nmatB matrix: \n"
.LC12:
.string "\nResult matrix: \n"
.text
.globl main
.type main, @function
main:
.LFB2070:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $88, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $3333, %edi
call srand@PLT
movq %rsp, %rdi
movl $900, %esi
call cudaMallocHost@PLT
leaq 8(%rsp), %rdi
movl $900, %esi
call cudaMallocHost@PLT
leaq 16(%rsp), %rdi
movl $900, %esi
call cudaMallocHost@PLT
movl $60, %ebp
.L28:
leaq -60(%rbp), %rbx
.L29:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
mulss .LC1(%rip), %xmm0
movq (%rsp), %rax
movss %xmm0, (%rax,%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L29
addq $60, %rbp
cmpq $960, %rbp
jne .L28
movl $60, %ebp
.L30:
leaq -60(%rbp), %rbx
.L31:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
mulss .LC1(%rip), %xmm0
movq 8(%rsp), %rax
movss %xmm0, (%rax,%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L31
addq $60, %rbp
cmpq $960, %rbp
jne .L30
leaq 24(%rsp), %rdi
movl $900, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $900, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $900, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $900, %edx
movq (%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $900, %edx
movq 8(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $900, %r8d
movl $15, %ecx
movl $15, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $900, %r8d
movl $15, %ecx
movl $15, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $900, %r8d
movl $15, %ecx
movl $15, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2700, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $32, 60(%rsp)
movl $32, 64(%rsp)
movl $1, 68(%rsp)
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 68(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movq 48(%rsp), %rdi
movl 56(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L33:
movl 68(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movq 48(%rsp), %rdi
movl 56(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L34:
movl 68(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movq 48(%rsp), %rdi
movl 56(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L55
.L35:
movl $2, %ecx
movl $900, %edx
movq 40(%rsp), %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
call cudaDeviceSynchronize@PLT
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $60, %ebp
leaq .LC9(%rip), %r12
leaq .LC10(%rip), %r13
.L36:
leaq -60(%rbp), %rbx
.L37:
movq (%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L37
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $60, %rbp
cmpq $960, %rbp
jne .L36
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $60, %ebp
leaq .LC9(%rip), %r12
leaq .LC10(%rip), %r13
.L39:
leaq -60(%rbp), %rbx
.L40:
movq 8(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L40
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $60, %rbp
cmpq $960, %rbp
jne .L39
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $60, %ebp
leaq .LC9(%rip), %r12
leaq .LC10(%rip), %r13
.L42:
leaq -60(%rbp), %rbx
.L43:
movq 16(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L43
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $60, %rbp
cmpq $960, %rbp
jne .L42
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq (%rsp), %rdi
call cudaFreeHost@PLT
movq 8(%rsp), %rdi
call cudaFreeHost@PLT
movq 16(%rsp), %rdi
call cudaFreeHost@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L56
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
movl $15, %edx
movl $15, %esi
movq 24(%rsp), %rdi
call _Z36__device_stub__Z14matrixTriUpperPfiiPfii
jmp .L33
.L54:
movl $15, %edx
movl $15, %esi
movq 32(%rsp), %rdi
call _Z36__device_stub__Z14matrixTriUpperPfiiPfii
jmp .L34
.L55:
movl $15, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i
jmp .L35
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size main, .-main
.section .rodata.str1.1
.LC13:
.string "_Z16squareMatrixMultPfS_S_i"
.LC14:
.string "_Z10matrixMultPfS_S_iii"
.LC15:
.string "_Z14matrixTriUpperPfii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2102:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z16squareMatrixMultPfS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z10matrixMultPfS_S_iii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z14matrixTriUpperPfii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2102:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.align 4
.LC1:
.long 1149239296
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* file name: TilingMatrix.cu
* NOTE:
* squareMatrixMult is much more efficent than the regular multiplier
* currently compiling with: nvcc TilingMatrix.cu -o tileTest
* Device Standards for: GeForce GTX 1060 6GB
* total global mem size: 6078 MBytes (6373572608 bytes)
* total shared mem per block: 49.152 KBytes (49152 bytes)
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <stdlib.h>
#include <sys/time.h> //measuring performance data
#define BLOCK_SIZE 32
/**********************************************************************
function name: matrixTriUpper
description: sets a matrix to an upper bound triangle matrix
parameters:
&a GPU device pointer to a m X n matrix (A)
Note:
return: none
**********************************************************************/
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
}
/**********************************************************************
function name: matrixMult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
**********************************************************************/
__global__ void matrixMult(float *a, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m) {
for(int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * k + col];
c[row * k + col] = sum;
}
}
/**********************************************************************
function name: squareMatrixMult
description: dot product of two matrix (not only square) in GPU
parameters:
&a GPU device pointer to a n X n matrix (A)
&b GPU device pointer to a n X n matrix (B)
&c GPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
SQUARE IS MUCH MORE EFFICENT THAN REGULAR
return: none
**********************************************************************/
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
}
/**********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
**********************************************************************/
int main(int argc, char** argv) {
int printAllMat = 1; // debug flag for printing all of the maticies
// Set sizes of the matrixes
int m=15;
int n=15;
int k=15;
/* Fixed seed for illustration */
srand(3333);
// Allocate memory in host RAM
float *copyA, *copyB, *copyC;
cudaMallocHost((void **) ©A, sizeof(float)*m*n); // copied matrix is m x n
cudaMallocHost((void **) ©B, sizeof(float)*n*k); // copied matrix is n x k
cudaMallocHost((void **) ©C, sizeof(float)*m*k); // copied matrix is m x k
// float x = (float)rand()/(float)(RAND_MAX/a);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
copyA[i * n + j] =((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
copyB[i * k + j] = ((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// Allocate memory space on the device
float *matA, *matB, *matC;
cudaMalloc((void **) &matA, sizeof(float)*m*n); // matrix is m x n
cudaMalloc((void **) &matB, sizeof(float)*n*k); // matrix is n x k
cudaMalloc((void **) &matC, sizeof(float)*m*k); // matrix is m x k
// copy matrix A and B from host to device memory
cudaMemcpy(matA, copyA, sizeof(float)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(matB, copyB, sizeof(float)*n*k, cudaMemcpyHostToDevice);
printf("size of matA %dX%d: %zu bytes\n", m,n,(sizeof(float)*m*n));
printf("size of matB %dX%d: %zu bytes\n", n,k,(sizeof(float)*n*k));
printf("size of matC %dX%d: %zu bytes\n", m,k,(sizeof(float)*m*k));
printf("total bytes allocated to mem: %zu bytes ", ((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)));
printf("(~%zu MBytes)\n\n", (((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)) / 1000000)); // get megabytes of the allocated arrays
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
printf("Calculating...\n\n");
// Launch kernel, check if it is a square
if(m == n && n == k) {
matrixTriUpper<<<dimGrid, dimBlock>>>(matA, m, n);
matrixTriUpper<<<dimGrid, dimBlock>>>(matB, n, k);
squareMatrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, n); // square, thus only need 1 param to define size
}
else { // not a square, thus it needs param to define all sizes
matrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, m, n, k);
}
// Transefr results from device to host
cudaMemcpy(copyC, matC, sizeof(float)*m*k, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize(); //possibly
//cudaThreadSynchronize();
//prints the matricies
// printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]); //Another possible way to print the matrix
//if the debug flag is on it will print the first two product arrays as well
int i,j;
if(printAllMat == 1) {
// print matrix A
printf("matA matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
//printf("[%d][%d]:%d, ", i, j, copyA[i*k + j]);
printf(" %f ", copyA[i*k + j]);
}
printf("\n");
}
// print matrix B
printf("\nmatB matrix: \n");
for (i = 0; i < n; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyB[i*k + j]);
printf(" %f ", copyB[i*k + j]);
}
printf("\n");
}
}
// print result matrix
printf("\nResult matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]);
printf(" %f ", copyC[i*k + j]);
}
printf("\n");
}
// free memory
cudaFree(matA);
cudaFree(matB);
cudaFree(matC);
cudaFreeHost(copyA);
cudaFreeHost(copyB);
cudaFreeHost(copyC);
return 0;
} | /*
* file name: TilingMatrix.cu
* NOTE:
* squareMatrixMult is much more efficent than the regular multiplier
* currently compiling with: nvcc TilingMatrix.cu -o tileTest
* Device Standards for: GeForce GTX 1060 6GB
* total global mem size: 6078 MBytes (6373572608 bytes)
* total shared mem per block: 49.152 KBytes (49152 bytes)
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <stdlib.h>
#include <sys/time.h> //measuring performance data
#define BLOCK_SIZE 32
/**********************************************************************
function name: matrixTriUpper
description: sets a matrix to an upper bound triangle matrix
parameters:
&a GPU device pointer to a m X n matrix (A)
Note:
return: none
**********************************************************************/
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
}
/**********************************************************************
function name: matrixMult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
**********************************************************************/
__global__ void matrixMult(float *a, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m) {
for(int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * k + col];
c[row * k + col] = sum;
}
}
/**********************************************************************
function name: squareMatrixMult
description: dot product of two matrix (not only square) in GPU
parameters:
&a GPU device pointer to a n X n matrix (A)
&b GPU device pointer to a n X n matrix (B)
&c GPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
SQUARE IS MUCH MORE EFFICENT THAN REGULAR
return: none
**********************************************************************/
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
}
/**********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
**********************************************************************/
int main(int argc, char** argv) {
int printAllMat = 1; // debug flag for printing all of the maticies
// Set sizes of the matrixes
int m=15;
int n=15;
int k=15;
/* Fixed seed for illustration */
srand(3333);
// Allocate memory in host RAM
float *copyA, *copyB, *copyC;
hipHostMalloc((void **) ©A, sizeof(float)*m*n, hipHostMallocDefault); // copied matrix is m x n
hipHostMalloc((void **) ©B, sizeof(float)*n*k, hipHostMallocDefault); // copied matrix is n x k
hipHostMalloc((void **) ©C, sizeof(float)*m*k, hipHostMallocDefault); // copied matrix is m x k
// float x = (float)rand()/(float)(RAND_MAX/a);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
copyA[i * n + j] =((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
copyB[i * k + j] = ((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// Allocate memory space on the device
float *matA, *matB, *matC;
hipMalloc((void **) &matA, sizeof(float)*m*n); // matrix is m x n
hipMalloc((void **) &matB, sizeof(float)*n*k); // matrix is n x k
hipMalloc((void **) &matC, sizeof(float)*m*k); // matrix is m x k
// copy matrix A and B from host to device memory
hipMemcpy(matA, copyA, sizeof(float)*m*n, hipMemcpyHostToDevice);
hipMemcpy(matB, copyB, sizeof(float)*n*k, hipMemcpyHostToDevice);
printf("size of matA %dX%d: %zu bytes\n", m,n,(sizeof(float)*m*n));
printf("size of matB %dX%d: %zu bytes\n", n,k,(sizeof(float)*n*k));
printf("size of matC %dX%d: %zu bytes\n", m,k,(sizeof(float)*m*k));
printf("total bytes allocated to mem: %zu bytes ", ((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)));
printf("(~%zu MBytes)\n\n", (((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)) / 1000000)); // get megabytes of the allocated arrays
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
printf("Calculating...\n\n");
// Launch kernel, check if it is a square
if(m == n && n == k) {
matrixTriUpper<<<dimGrid, dimBlock>>>(matA, m, n);
matrixTriUpper<<<dimGrid, dimBlock>>>(matB, n, k);
squareMatrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, n); // square, thus only need 1 param to define size
}
else { // not a square, thus it needs param to define all sizes
matrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, m, n, k);
}
// Transefr results from device to host
hipMemcpy(copyC, matC, sizeof(float)*m*k, hipMemcpyDeviceToHost);
hipDeviceSynchronize(); //possibly
//cudaThreadSynchronize();
//prints the matricies
// printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]); //Another possible way to print the matrix
//if the debug flag is on it will print the first two product arrays as well
int i,j;
if(printAllMat == 1) {
// print matrix A
printf("matA matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
//printf("[%d][%d]:%d, ", i, j, copyA[i*k + j]);
printf(" %f ", copyA[i*k + j]);
}
printf("\n");
}
// print matrix B
printf("\nmatB matrix: \n");
for (i = 0; i < n; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyB[i*k + j]);
printf(" %f ", copyB[i*k + j]);
}
printf("\n");
}
}
// print result matrix
printf("\nResult matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]);
printf(" %f ", copyC[i*k + j]);
}
printf("\n");
}
// free memory
hipFree(matA);
hipFree(matB);
hipFree(matC);
hipHostFree(copyA);
hipHostFree(copyB);
hipHostFree(copyC);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
* file name: TilingMatrix.cu
* NOTE:
* squareMatrixMult is much more efficent than the regular multiplier
* currently compiling with: nvcc TilingMatrix.cu -o tileTest
* Device Standards for: GeForce GTX 1060 6GB
* total global mem size: 6078 MBytes (6373572608 bytes)
* total shared mem per block: 49.152 KBytes (49152 bytes)
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <stdlib.h>
#include <sys/time.h> //measuring performance data
#define BLOCK_SIZE 32
/**********************************************************************
function name: matrixTriUpper
description: sets a matrix to an upper bound triangle matrix
parameters:
&a GPU device pointer to a m X n matrix (A)
Note:
return: none
**********************************************************************/
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
}
/**********************************************************************
function name: matrixMult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
**********************************************************************/
__global__ void matrixMult(float *a, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m) {
for(int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * k + col];
c[row * k + col] = sum;
}
}
/**********************************************************************
function name: squareMatrixMult
description: dot product of two matrix (not only square) in GPU
parameters:
&a GPU device pointer to a n X n matrix (A)
&b GPU device pointer to a n X n matrix (B)
&c GPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
SQUARE IS MUCH MORE EFFICENT THAN REGULAR
return: none
**********************************************************************/
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
}
/**********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
**********************************************************************/
int main(int argc, char** argv) {
int printAllMat = 1; // debug flag for printing all of the maticies
// Set sizes of the matrixes
int m=15;
int n=15;
int k=15;
/* Fixed seed for illustration */
srand(3333);
// Allocate memory in host RAM
float *copyA, *copyB, *copyC;
hipHostMalloc((void **) ©A, sizeof(float)*m*n, hipHostMallocDefault); // copied matrix is m x n
hipHostMalloc((void **) ©B, sizeof(float)*n*k, hipHostMallocDefault); // copied matrix is n x k
hipHostMalloc((void **) ©C, sizeof(float)*m*k, hipHostMallocDefault); // copied matrix is m x k
// float x = (float)rand()/(float)(RAND_MAX/a);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
copyA[i * n + j] =((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
copyB[i * k + j] = ((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// Allocate memory space on the device
float *matA, *matB, *matC;
hipMalloc((void **) &matA, sizeof(float)*m*n); // matrix is m x n
hipMalloc((void **) &matB, sizeof(float)*n*k); // matrix is n x k
hipMalloc((void **) &matC, sizeof(float)*m*k); // matrix is m x k
// copy matrix A and B from host to device memory
hipMemcpy(matA, copyA, sizeof(float)*m*n, hipMemcpyHostToDevice);
hipMemcpy(matB, copyB, sizeof(float)*n*k, hipMemcpyHostToDevice);
printf("size of matA %dX%d: %zu bytes\n", m,n,(sizeof(float)*m*n));
printf("size of matB %dX%d: %zu bytes\n", n,k,(sizeof(float)*n*k));
printf("size of matC %dX%d: %zu bytes\n", m,k,(sizeof(float)*m*k));
printf("total bytes allocated to mem: %zu bytes ", ((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)));
printf("(~%zu MBytes)\n\n", (((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)) / 1000000)); // get megabytes of the allocated arrays
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
printf("Calculating...\n\n");
// Launch kernel, check if it is a square
if(m == n && n == k) {
matrixTriUpper<<<dimGrid, dimBlock>>>(matA, m, n);
matrixTriUpper<<<dimGrid, dimBlock>>>(matB, n, k);
squareMatrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, n); // square, thus only need 1 param to define size
}
else { // not a square, thus it needs param to define all sizes
matrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, m, n, k);
}
// Transefr results from device to host
hipMemcpy(copyC, matC, sizeof(float)*m*k, hipMemcpyDeviceToHost);
hipDeviceSynchronize(); //possibly
//cudaThreadSynchronize();
//prints the matricies
// printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]); //Another possible way to print the matrix
//if the debug flag is on it will print the first two product arrays as well
int i,j;
if(printAllMat == 1) {
// print matrix A
printf("matA matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
//printf("[%d][%d]:%d, ", i, j, copyA[i*k + j]);
printf(" %f ", copyA[i*k + j]);
}
printf("\n");
}
// print matrix B
printf("\nmatB matrix: \n");
for (i = 0; i < n; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyB[i*k + j]);
printf(" %f ", copyB[i*k + j]);
}
printf("\n");
}
}
// print result matrix
printf("\nResult matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]);
printf(" %f ", copyC[i*k + j]);
}
printf("\n");
}
// free memory
hipFree(matA);
hipFree(matB);
hipFree(matC);
hipHostFree(copyA);
hipHostFree(copyB);
hipHostFree(copyC);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14matrixTriUpperPfii
.globl _Z14matrixTriUpperPfii
.p2align 8
.type _Z14matrixTriUpperPfii,@function
_Z14matrixTriUpperPfii:
s_load_b32 s6, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s6, 1
s_cbranch_scc1 .LBB0_8
s_clause 0x1
s_load_b32 s7, s[0:1], 0xc
s_load_b64 s[2:3], s[0:1], 0x0
s_mov_b32 s5, 0
s_mov_b32 s1, 0
s_mov_b32 s8, 0
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s7, 0
s_cselect_b32 s0, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e64 v0, 0, 1, s0
v_cmp_ne_u32_e64 s0, 1, v0
v_mov_b32_e32 v0, 0
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_add_i32 s8, s8, 1
s_add_i32 s1, s1, s7
s_cmp_eq_u32 s8, s6
s_cbranch_scc1 .LBB0_8
.LBB0_3:
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccnz .LBB0_2
s_mov_b32 s9, 0
s_branch .LBB0_6
.LBB0_5:
s_add_i32 s9, s9, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s7, s9
s_cbranch_scc1 .LBB0_2
.LBB0_6:
s_cmp_le_u32 s8, s9
s_cbranch_scc1 .LBB0_5
s_add_i32 s4, s1, s9
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[10:11], s[4:5], 2
s_add_u32 s10, s2, s10
s_addc_u32 s11, s3, s11
global_store_b32 v0, v0, s[10:11]
s_branch .LBB0_5
.LBB0_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14matrixTriUpperPfii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 12
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14matrixTriUpperPfii, .Lfunc_end0-_Z14matrixTriUpperPfii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z10matrixMultPfS_S_iii
.globl _Z10matrixMultPfS_S_iii
.p2align 8
.type _Z10matrixMultPfS_S_iii,@function
_Z10matrixMultPfS_S_iii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x20
s_load_b32 s4, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s2, 16
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, s14, s2, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s5, v[3:4]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s3, v0
v_cmp_gt_i32_e64 s2, s4, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB1_6
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB1_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v2, v1, s2
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v4, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
.p2align 6
.LBB1_3:
v_ashrrev_i32_e32 v5, 31, v4
s_add_i32 s2, s2, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s2, 0
v_lshlrev_b64 v[7:8], 2, v[4:5]
v_add_nc_u32_e32 v4, s3, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s6, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v8, vcc_lo
global_load_b32 v5, v[2:3], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v2, vcc_lo, v2, 4
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v5, v7
s_cbranch_scc0 .LBB1_3
s_branch .LBB1_5
.LBB1_4:
v_mov_b32_e32 v6, 0
.LBB1_5:
s_load_b64 s[0:1], s[0:1], 0x10
v_mad_u64_u32 v[2:3], null, v1, s3, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v6, off
.LBB1_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10matrixMultPfS_S_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z10matrixMultPfS_S_iii, .Lfunc_end1-_Z10matrixMultPfS_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z16squareMatrixMultPfS_S_i
.globl _Z16squareMatrixMultPfS_S_i
.p2align 8
.type _Z16squareMatrixMultPfS_S_i,@function
_Z16squareMatrixMultPfS_S_i:
s_clause 0x1
s_load_b32 s8, s[0:1], 0x20
s_load_b32 s3, s[0:1], 0x18
v_bfe_u32 v5, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_mov_b32 s9, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v4, s15, 5, v5
v_lshl_add_u32 v0, s14, 5, v3
s_waitcnt lgkmcnt(0)
s_cmp_eq_u32 s8, 0
s_cbranch_scc1 .LBB2_15
v_cvt_f32_u32_e32 v1, s3
s_sub_i32 s2, 0, s3
s_load_b128 s[4:7], s[0:1], 0x0
s_mul_i32 s10, s3, s3
v_mov_b32_e32 v8, 0
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v1, v1
v_mul_lo_u32 v2, s2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v2, v1, v2
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v1, v3, v1
v_mul_lo_u32 v2, v1, s3
v_add_nc_u32_e32 v6, 1, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v2, v3, v2
v_subrev_nc_u32_e32 v7, s3, v2
v_cmp_le_u32_e32 vcc_lo, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_dual_cndmask_b32 v2, v2, v7 :: v_dual_cndmask_b32 v1, v1, v6
v_lshlrev_b32_e32 v6, 2, v3
v_cmp_le_u32_e32 vcc_lo, s3, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v7, 1, v1
v_dual_cndmask_b32 v12, v1, v7 :: v_dual_lshlrev_b32 v11, 7, v5
v_mad_u64_u32 v[1:2], null, v4, s3, v[3:4]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v7, v11, v6
v_lshlrev_b32_e32 v2, 7, v12
v_cmp_gt_u32_e32 vcc_lo, 32, v12
v_add_nc_u32_e32 v10, -1, v12
s_delay_alu instid0(VALU_DEP_4)
v_add_nc_u32_e32 v9, 0x1000, v7
v_lshl_add_u32 v11, v12, 2, v11
v_add3_u32 v12, v2, v6, 0x1000
v_mov_b32_e32 v6, 0
s_branch .LBB2_3
.LBB2_2:
s_or_b32 exec_lo, exec_lo, s11
s_add_i32 s9, s9, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s9, s8
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB2_16
.LBB2_3:
s_lshl_b32 s11, s9, 5
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, s11, v1
v_cmp_le_i32_e64 s2, s10, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s12, s2
s_xor_b32 s2, exec_lo, s12
s_cbranch_execz .LBB2_5
ds_store_b32 v7, v8
.LBB2_5:
s_and_not1_saveexec_b32 s12, s2
s_cbranch_execz .LBB2_7
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, s2, s4, v2
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s2, s5, v3, s2
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
ds_store_b32 v7, v2
.LBB2_7:
s_or_b32 exec_lo, exec_lo, s12
v_add_nc_u32_e32 v13, s11, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v13, s3, v[0:1]
v_cmp_le_i32_e64 s2, s10, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s11, s2
s_xor_b32 s2, exec_lo, s11
s_cbranch_execz .LBB2_9
ds_store_b32 v9, v8
.LBB2_9:
s_and_not1_saveexec_b32 s11, s2
s_cbranch_execz .LBB2_11
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, s2, s6, v2
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s2, s7, v3, s2
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
ds_store_b32 v9, v2
.LBB2_11:
s_or_b32 exec_lo, exec_lo, s11
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s11, vcc_lo
s_cbranch_execz .LBB2_2
v_dual_mov_b32 v2, v12 :: v_dual_mov_b32 v3, v11
v_mov_b32_e32 v13, v10
s_mov_b32 s12, 0
.LBB2_13:
ds_load_b32 v14, v3
ds_load_b32 v15, v2
v_add_nc_u32_e32 v13, 1, v13
v_add_nc_u32_e32 v3, 4, v3
v_add_nc_u32_e32 v2, 0x80, v2
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v6, v14, v15
v_cmp_lt_u32_e64 s2, 30, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s12, s2, s12
s_and_not1_b32 exec_lo, exec_lo, s12
s_cbranch_execnz .LBB2_13
s_or_b32 exec_lo, exec_lo, s12
s_branch .LBB2_2
.LBB2_15:
v_mov_b32_e32 v6, 0
.LBB2_16:
v_max_i32_e32 v1, v4, v0
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB2_18
s_load_b64 s[0:1], s[0:1], 0x10
v_mad_u64_u32 v[1:2], null, v4, s3, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v6, off
.LBB2_18:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16squareMatrixMultPfS_S_i
.amdhsa_group_segment_fixed_size 8192
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 16
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z16squareMatrixMultPfS_S_i, .Lfunc_end2-_Z16squareMatrixMultPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14matrixTriUpperPfii
.private_segment_fixed_size: 0
.sgpr_count: 14
.sgpr_spill_count: 0
.symbol: _Z14matrixTriUpperPfii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 1
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10matrixMultPfS_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10matrixMultPfS_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 8192
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16squareMatrixMultPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16squareMatrixMultPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 16
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
* file name: TilingMatrix.cu
* NOTE:
* squareMatrixMult is much more efficent than the regular multiplier
* currently compiling with: nvcc TilingMatrix.cu -o tileTest
* Device Standards for: GeForce GTX 1060 6GB
* total global mem size: 6078 MBytes (6373572608 bytes)
* total shared mem per block: 49.152 KBytes (49152 bytes)
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <stdlib.h>
#include <sys/time.h> //measuring performance data
#define BLOCK_SIZE 32
/**********************************************************************
function name: matrixTriUpper
description: sets a matrix to an upper bound triangle matrix
parameters:
&a GPU device pointer to a m X n matrix (A)
Note:
return: none
**********************************************************************/
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
}
/**********************************************************************
function name: matrixMult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
**********************************************************************/
__global__ void matrixMult(float *a, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m) {
for(int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * k + col];
c[row * k + col] = sum;
}
}
/**********************************************************************
function name: squareMatrixMult
description: dot product of two matrix (not only square) in GPU
parameters:
&a GPU device pointer to a n X n matrix (A)
&b GPU device pointer to a n X n matrix (B)
&c GPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
SQUARE IS MUCH MORE EFFICENT THAN REGULAR
return: none
**********************************************************************/
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
}
/**********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
**********************************************************************/
int main(int argc, char** argv) {
int printAllMat = 1; // debug flag for printing all of the maticies
// Set sizes of the matrixes
int m=15;
int n=15;
int k=15;
/* Fixed seed for illustration */
srand(3333);
// Allocate memory in host RAM
float *copyA, *copyB, *copyC;
hipHostMalloc((void **) ©A, sizeof(float)*m*n, hipHostMallocDefault); // copied matrix is m x n
hipHostMalloc((void **) ©B, sizeof(float)*n*k, hipHostMallocDefault); // copied matrix is n x k
hipHostMalloc((void **) ©C, sizeof(float)*m*k, hipHostMallocDefault); // copied matrix is m x k
// float x = (float)rand()/(float)(RAND_MAX/a);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
copyA[i * n + j] =((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
copyB[i * k + j] = ((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// Allocate memory space on the device
float *matA, *matB, *matC;
hipMalloc((void **) &matA, sizeof(float)*m*n); // matrix is m x n
hipMalloc((void **) &matB, sizeof(float)*n*k); // matrix is n x k
hipMalloc((void **) &matC, sizeof(float)*m*k); // matrix is m x k
// copy matrix A and B from host to device memory
hipMemcpy(matA, copyA, sizeof(float)*m*n, hipMemcpyHostToDevice);
hipMemcpy(matB, copyB, sizeof(float)*n*k, hipMemcpyHostToDevice);
printf("size of matA %dX%d: %zu bytes\n", m,n,(sizeof(float)*m*n));
printf("size of matB %dX%d: %zu bytes\n", n,k,(sizeof(float)*n*k));
printf("size of matC %dX%d: %zu bytes\n", m,k,(sizeof(float)*m*k));
printf("total bytes allocated to mem: %zu bytes ", ((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)));
printf("(~%zu MBytes)\n\n", (((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)) / 1000000)); // get megabytes of the allocated arrays
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
printf("Calculating...\n\n");
// Launch kernel, check if it is a square
if(m == n && n == k) {
matrixTriUpper<<<dimGrid, dimBlock>>>(matA, m, n);
matrixTriUpper<<<dimGrid, dimBlock>>>(matB, n, k);
squareMatrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, n); // square, thus only need 1 param to define size
}
else { // not a square, thus it needs param to define all sizes
matrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, m, n, k);
}
// Transefr results from device to host
hipMemcpy(copyC, matC, sizeof(float)*m*k, hipMemcpyDeviceToHost);
hipDeviceSynchronize(); //possibly
//cudaThreadSynchronize();
//prints the matricies
// printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]); //Another possible way to print the matrix
//if the debug flag is on it will print the first two product arrays as well
int i,j;
if(printAllMat == 1) {
// print matrix A
printf("matA matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
//printf("[%d][%d]:%d, ", i, j, copyA[i*k + j]);
printf(" %f ", copyA[i*k + j]);
}
printf("\n");
}
// print matrix B
printf("\nmatB matrix: \n");
for (i = 0; i < n; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyB[i*k + j]);
printf(" %f ", copyB[i*k + j]);
}
printf("\n");
}
}
// print result matrix
printf("\nResult matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]);
printf(" %f ", copyC[i*k + j]);
}
printf("\n");
}
// free memory
hipFree(matA);
hipFree(matB);
hipFree(matC);
hipHostFree(copyA);
hipHostFree(copyB);
hipHostFree(copyC);
return 0;
} | .text
.file "TilingProgram.hip"
.globl _Z29__device_stub__matrixTriUpperPfii # -- Begin function _Z29__device_stub__matrixTriUpperPfii
.p2align 4, 0x90
.type _Z29__device_stub__matrixTriUpperPfii,@function
_Z29__device_stub__matrixTriUpperPfii: # @_Z29__device_stub__matrixTriUpperPfii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z14matrixTriUpperPfii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z29__device_stub__matrixTriUpperPfii, .Lfunc_end0-_Z29__device_stub__matrixTriUpperPfii
.cfi_endproc
# -- End function
.globl _Z25__device_stub__matrixMultPfS_S_iii # -- Begin function _Z25__device_stub__matrixMultPfS_S_iii
.p2align 4, 0x90
.type _Z25__device_stub__matrixMultPfS_S_iii,@function
_Z25__device_stub__matrixMultPfS_S_iii: # @_Z25__device_stub__matrixMultPfS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10matrixMultPfS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z25__device_stub__matrixMultPfS_S_iii, .Lfunc_end1-_Z25__device_stub__matrixMultPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z31__device_stub__squareMatrixMultPfS_S_i # -- Begin function _Z31__device_stub__squareMatrixMultPfS_S_i
.p2align 4, 0x90
.type _Z31__device_stub__squareMatrixMultPfS_S_i,@function
_Z31__device_stub__squareMatrixMultPfS_S_i: # @_Z31__device_stub__squareMatrixMultPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16squareMatrixMultPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z31__device_stub__squareMatrixMultPfS_S_i, .Lfunc_end2-_Z31__device_stub__squareMatrixMultPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI3_1:
.long 0x44800000 # float 1024
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $160, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $3333, %edi # imm = 0xD05
callq srand
leaq 96(%rsp), %rdi
xorl %ebx, %ebx
movl $900, %esi # imm = 0x384
xorl %edx, %edx
callq hipHostMalloc
leaq 88(%rsp), %rdi
movl $900, %esi # imm = 0x384
xorl %edx, %edx
callq hipHostMalloc
leaq 112(%rsp), %rdi
movl $900, %esi # imm = 0x384
xorl %edx, %edx
callq hipHostMalloc
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_1: # %.preheader175
# =>This Loop Header: Depth=1
# Child Loop BB3_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_2: # Parent Loop BB3_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI3_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss .LCPI3_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss %xmm1, %xmm0
movq 96(%rsp), %rax
addq %rbx, %rax
movss %xmm0, (%rax,%r15,4)
incq %r15
cmpq $15, %r15
jne .LBB3_2
# %bb.3: # in Loop: Header=BB3_1 Depth=1
incq %r14
addq $60, %rbx
cmpq $15, %r14
jne .LBB3_1
# %bb.4: # %.preheader173.preheader
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_5: # %.preheader173
# =>This Loop Header: Depth=1
# Child Loop BB3_6 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_6: # Parent Loop BB3_5 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI3_0(%rip), %xmm0
mulss .LCPI3_1(%rip), %xmm0
movq 88(%rsp), %rax
addq %rbx, %rax
movss %xmm0, (%rax,%r15,4)
incq %r15
cmpq $15, %r15
jne .LBB3_6
# %bb.7: # in Loop: Header=BB3_5 Depth=1
incq %r14
addq $60, %rbx
cmpq $15, %r14
jne .LBB3_5
# %bb.8:
movabsq $137438953504, %rbx # imm = 0x2000000020
movabsq $4294967297, %r14 # imm = 0x100000001
leaq 80(%rsp), %rdi
movl $900, %esi # imm = 0x384
callq hipMalloc
leaq 72(%rsp), %rdi
movl $900, %esi # imm = 0x384
callq hipMalloc
leaq 104(%rsp), %rdi
movl $900, %esi # imm = 0x384
callq hipMalloc
movq 80(%rsp), %rdi
movq 96(%rsp), %rsi
movl $900, %edx # imm = 0x384
movl $1, %ecx
callq hipMemcpy
movq 72(%rsp), %rdi
movq 88(%rsp), %rsi
movl $900, %edx # imm = 0x384
movl $1, %ecx
callq hipMemcpy
movl $.L.str, %edi
movl $900, %ecx # imm = 0x384
movl $15, %esi
movl $15, %edx
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $900, %ecx # imm = 0x384
movl $15, %esi
movl $15, %edx
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movl $900, %ecx # imm = 0x384
movl $15, %esi
movl $15, %edx
xorl %eax, %eax
callq printf
movl $.L.str.3, %edi
movl $2700, %esi # imm = 0xA8C
xorl %eax, %eax
callq printf
movl $.L.str.4, %edi
xorl %esi, %esi
xorl %eax, %eax
callq printf
movl $.Lstr, %edi
callq puts@PLT
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_10
# %bb.9:
movq 80(%rsp), %rax
movq %rax, 64(%rsp)
movl $15, 8(%rsp)
movl $15, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
movq %rsp, %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z14matrixTriUpperPfii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_10:
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_12
# %bb.11:
movq 72(%rsp), %rax
movq %rax, 64(%rsp)
movl $15, 8(%rsp)
movl $15, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
movq %rsp, %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z14matrixTriUpperPfii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_12:
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_14
# %bb.13:
movq 80(%rsp), %rax
movq 72(%rsp), %rcx
movq 104(%rsp), %rdx
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movq %rdx, 48(%rsp)
movl $15, 124(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rax
movq %rax, 144(%rsp)
leaq 124(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z16squareMatrixMultPfS_S_i, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_14:
movq 112(%rsp), %rdi
movq 104(%rsp), %rsi
movl $900, %edx # imm = 0x384
movl $2, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
movl $.Lstr.1, %edi
callq puts@PLT
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_15: # %.preheader172
# =>This Loop Header: Depth=1
# Child Loop BB3_16 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_16: # Parent Loop BB3_15 Depth=1
# => This Inner Loop Header: Depth=2
movq 96(%rsp), %rax
addq %rbx, %rax
movss (%rax,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %edi
movb $1, %al
callq printf
incq %r15
cmpq $15, %r15
jne .LBB3_16
# %bb.17: # in Loop: Header=BB3_15 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $60, %rbx
cmpq $15, %r14
jne .LBB3_15
# %bb.18:
movl $.Lstr.2, %edi
callq puts@PLT
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_19: # %.preheader171
# =>This Loop Header: Depth=1
# Child Loop BB3_20 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_20: # Parent Loop BB3_19 Depth=1
# => This Inner Loop Header: Depth=2
movq 88(%rsp), %rax
addq %rbx, %rax
movss (%rax,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %edi
movb $1, %al
callq printf
incq %r15
cmpq $15, %r15
jne .LBB3_20
# %bb.21: # in Loop: Header=BB3_19 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $60, %rbx
cmpq $15, %r14
jne .LBB3_19
# %bb.22:
movl $.Lstr.3, %edi
callq puts@PLT
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_23: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_24 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_24: # Parent Loop BB3_23 Depth=1
# => This Inner Loop Header: Depth=2
movq 112(%rsp), %rax
addq %rbx, %rax
movss (%rax,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %edi
movb $1, %al
callq printf
incq %r15
cmpq $15, %r15
jne .LBB3_24
# %bb.25: # in Loop: Header=BB3_23 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $60, %rbx
cmpq $15, %r14
jne .LBB3_23
# %bb.26:
movq 80(%rsp), %rdi
callq hipFree
movq 72(%rsp), %rdi
callq hipFree
movq 104(%rsp), %rdi
callq hipFree
movq 96(%rsp), %rdi
callq hipHostFree
movq 88(%rsp), %rdi
callq hipHostFree
movq 112(%rsp), %rdi
callq hipHostFree
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14matrixTriUpperPfii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10matrixMultPfS_S_iii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16squareMatrixMultPfS_S_i, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14matrixTriUpperPfii,@object # @_Z14matrixTriUpperPfii
.section .rodata,"a",@progbits
.globl _Z14matrixTriUpperPfii
.p2align 3, 0x0
_Z14matrixTriUpperPfii:
.quad _Z29__device_stub__matrixTriUpperPfii
.size _Z14matrixTriUpperPfii, 8
.type _Z10matrixMultPfS_S_iii,@object # @_Z10matrixMultPfS_S_iii
.globl _Z10matrixMultPfS_S_iii
.p2align 3, 0x0
_Z10matrixMultPfS_S_iii:
.quad _Z25__device_stub__matrixMultPfS_S_iii
.size _Z10matrixMultPfS_S_iii, 8
.type _Z16squareMatrixMultPfS_S_i,@object # @_Z16squareMatrixMultPfS_S_i
.globl _Z16squareMatrixMultPfS_S_i
.p2align 3, 0x0
_Z16squareMatrixMultPfS_S_i:
.quad _Z31__device_stub__squareMatrixMultPfS_S_i
.size _Z16squareMatrixMultPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "size of matA %dX%d: %zu bytes\n"
.size .L.str, 31
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "size of matB %dX%d: %zu bytes\n"
.size .L.str.1, 31
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "size of matC %dX%d: %zu bytes\n"
.size .L.str.2, 31
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "total bytes allocated to mem: %zu bytes "
.size .L.str.3, 41
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "(~%zu MBytes)\n\n"
.size .L.str.4, 16
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz " %f "
.size .L.str.7, 5
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14matrixTriUpperPfii"
.size .L__unnamed_1, 23
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z10matrixMultPfS_S_iii"
.size .L__unnamed_2, 24
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z16squareMatrixMultPfS_S_i"
.size .L__unnamed_3, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Calculating...\n"
.size .Lstr, 16
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "matA matrix: "
.size .Lstr.1, 14
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\nmatB matrix: "
.size .Lstr.2, 15
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "\nResult matrix: "
.size .Lstr.3, 17
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__matrixTriUpperPfii
.addrsig_sym _Z25__device_stub__matrixMultPfS_S_iii
.addrsig_sym _Z31__device_stub__squareMatrixMultPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14matrixTriUpperPfii
.addrsig_sym _Z10matrixMultPfS_S_iii
.addrsig_sym _Z16squareMatrixMultPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00020a05_00000000-6_TilingProgram.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2073:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z36__device_stub__Z14matrixTriUpperPfiiPfii
.type _Z36__device_stub__Z14matrixTriUpperPfiiPfii, @function
_Z36__device_stub__Z14matrixTriUpperPfiiPfii:
.LFB2095:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z14matrixTriUpperPfii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2095:
.size _Z36__device_stub__Z14matrixTriUpperPfiiPfii, .-_Z36__device_stub__Z14matrixTriUpperPfiiPfii
.globl _Z14matrixTriUpperPfii
.type _Z14matrixTriUpperPfii, @function
_Z14matrixTriUpperPfii:
.LFB2096:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z14matrixTriUpperPfiiPfii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _Z14matrixTriUpperPfii, .-_Z14matrixTriUpperPfii
.globl _Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii
.type _Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii, @function
_Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii:
.LFB2097:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z10matrixMultPfS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii, .-_Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii
.globl _Z10matrixMultPfS_S_iii
.type _Z10matrixMultPfS_S_iii, @function
_Z10matrixMultPfS_S_iii:
.LFB2098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z10matrixMultPfS_S_iiiPfS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _Z10matrixMultPfS_S_iii, .-_Z10matrixMultPfS_S_iii
.globl _Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i
.type _Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i, @function
_Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i:
.LFB2099:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16squareMatrixMultPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2099:
.size _Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i, .-_Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i
.globl _Z16squareMatrixMultPfS_S_i
.type _Z16squareMatrixMultPfS_S_i, @function
_Z16squareMatrixMultPfS_S_i:
.LFB2100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2100:
.size _Z16squareMatrixMultPfS_S_i, .-_Z16squareMatrixMultPfS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "size of matA %dX%d: %zu bytes\n"
.align 8
.LC3:
.string "size of matB %dX%d: %zu bytes\n"
.align 8
.LC4:
.string "size of matC %dX%d: %zu bytes\n"
.align 8
.LC5:
.string "total bytes allocated to mem: %zu bytes "
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "(~%zu MBytes)\n\n"
.LC7:
.string "Calculating...\n\n"
.LC8:
.string "matA matrix: \n"
.LC9:
.string " %f "
.LC10:
.string "\n"
.LC11:
.string "\nmatB matrix: \n"
.LC12:
.string "\nResult matrix: \n"
.text
.globl main
.type main, @function
main:
.LFB2070:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $88, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $3333, %edi
call srand@PLT
movq %rsp, %rdi
movl $900, %esi
call cudaMallocHost@PLT
leaq 8(%rsp), %rdi
movl $900, %esi
call cudaMallocHost@PLT
leaq 16(%rsp), %rdi
movl $900, %esi
call cudaMallocHost@PLT
movl $60, %ebp
.L28:
leaq -60(%rbp), %rbx
.L29:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
mulss .LC1(%rip), %xmm0
movq (%rsp), %rax
movss %xmm0, (%rax,%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L29
addq $60, %rbp
cmpq $960, %rbp
jne .L28
movl $60, %ebp
.L30:
leaq -60(%rbp), %rbx
.L31:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
mulss .LC1(%rip), %xmm0
movq 8(%rsp), %rax
movss %xmm0, (%rax,%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L31
addq $60, %rbp
cmpq $960, %rbp
jne .L30
leaq 24(%rsp), %rdi
movl $900, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $900, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $900, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $900, %edx
movq (%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $900, %edx
movq 8(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $900, %r8d
movl $15, %ecx
movl $15, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $900, %r8d
movl $15, %ecx
movl $15, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $900, %r8d
movl $15, %ecx
movl $15, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2700, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $32, 60(%rsp)
movl $32, 64(%rsp)
movl $1, 68(%rsp)
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 68(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movq 48(%rsp), %rdi
movl 56(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L33:
movl 68(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movq 48(%rsp), %rdi
movl 56(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L34:
movl 68(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movq 48(%rsp), %rdi
movl 56(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L55
.L35:
movl $2, %ecx
movl $900, %edx
movq 40(%rsp), %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
call cudaDeviceSynchronize@PLT
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $60, %ebp
leaq .LC9(%rip), %r12
leaq .LC10(%rip), %r13
.L36:
leaq -60(%rbp), %rbx
.L37:
movq (%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L37
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $60, %rbp
cmpq $960, %rbp
jne .L36
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $60, %ebp
leaq .LC9(%rip), %r12
leaq .LC10(%rip), %r13
.L39:
leaq -60(%rbp), %rbx
.L40:
movq 8(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L40
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $60, %rbp
cmpq $960, %rbp
jne .L39
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $60, %ebp
leaq .LC9(%rip), %r12
leaq .LC10(%rip), %r13
.L42:
leaq -60(%rbp), %rbx
.L43:
movq 16(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L43
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $60, %rbp
cmpq $960, %rbp
jne .L42
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq (%rsp), %rdi
call cudaFreeHost@PLT
movq 8(%rsp), %rdi
call cudaFreeHost@PLT
movq 16(%rsp), %rdi
call cudaFreeHost@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L56
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
movl $15, %edx
movl $15, %esi
movq 24(%rsp), %rdi
call _Z36__device_stub__Z14matrixTriUpperPfiiPfii
jmp .L33
.L54:
movl $15, %edx
movl $15, %esi
movq 32(%rsp), %rdi
call _Z36__device_stub__Z14matrixTriUpperPfiiPfii
jmp .L34
.L55:
movl $15, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z41__device_stub__Z16squareMatrixMultPfS_S_iPfS_S_i
jmp .L35
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size main, .-main
.section .rodata.str1.1
.LC13:
.string "_Z16squareMatrixMultPfS_S_i"
.LC14:
.string "_Z10matrixMultPfS_S_iii"
.LC15:
.string "_Z14matrixTriUpperPfii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2102:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z16squareMatrixMultPfS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z10matrixMultPfS_S_iii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z14matrixTriUpperPfii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2102:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.align 4
.LC1:
.long 1149239296
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "TilingProgram.hip"
.globl _Z29__device_stub__matrixTriUpperPfii # -- Begin function _Z29__device_stub__matrixTriUpperPfii
.p2align 4, 0x90
.type _Z29__device_stub__matrixTriUpperPfii,@function
_Z29__device_stub__matrixTriUpperPfii: # @_Z29__device_stub__matrixTriUpperPfii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z14matrixTriUpperPfii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z29__device_stub__matrixTriUpperPfii, .Lfunc_end0-_Z29__device_stub__matrixTriUpperPfii
.cfi_endproc
# -- End function
.globl _Z25__device_stub__matrixMultPfS_S_iii # -- Begin function _Z25__device_stub__matrixMultPfS_S_iii
.p2align 4, 0x90
.type _Z25__device_stub__matrixMultPfS_S_iii,@function
_Z25__device_stub__matrixMultPfS_S_iii: # @_Z25__device_stub__matrixMultPfS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10matrixMultPfS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z25__device_stub__matrixMultPfS_S_iii, .Lfunc_end1-_Z25__device_stub__matrixMultPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z31__device_stub__squareMatrixMultPfS_S_i # -- Begin function _Z31__device_stub__squareMatrixMultPfS_S_i
.p2align 4, 0x90
.type _Z31__device_stub__squareMatrixMultPfS_S_i,@function
_Z31__device_stub__squareMatrixMultPfS_S_i: # @_Z31__device_stub__squareMatrixMultPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16squareMatrixMultPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z31__device_stub__squareMatrixMultPfS_S_i, .Lfunc_end2-_Z31__device_stub__squareMatrixMultPfS_S_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI3_1:
.long 0x44800000 # float 1024
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $160, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $3333, %edi # imm = 0xD05
callq srand
leaq 96(%rsp), %rdi
xorl %ebx, %ebx
movl $900, %esi # imm = 0x384
xorl %edx, %edx
callq hipHostMalloc
leaq 88(%rsp), %rdi
movl $900, %esi # imm = 0x384
xorl %edx, %edx
callq hipHostMalloc
leaq 112(%rsp), %rdi
movl $900, %esi # imm = 0x384
xorl %edx, %edx
callq hipHostMalloc
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_1: # %.preheader175
# =>This Loop Header: Depth=1
# Child Loop BB3_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_2: # Parent Loop BB3_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI3_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss .LCPI3_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss %xmm1, %xmm0
movq 96(%rsp), %rax
addq %rbx, %rax
movss %xmm0, (%rax,%r15,4)
incq %r15
cmpq $15, %r15
jne .LBB3_2
# %bb.3: # in Loop: Header=BB3_1 Depth=1
incq %r14
addq $60, %rbx
cmpq $15, %r14
jne .LBB3_1
# %bb.4: # %.preheader173.preheader
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_5: # %.preheader173
# =>This Loop Header: Depth=1
# Child Loop BB3_6 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_6: # Parent Loop BB3_5 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI3_0(%rip), %xmm0
mulss .LCPI3_1(%rip), %xmm0
movq 88(%rsp), %rax
addq %rbx, %rax
movss %xmm0, (%rax,%r15,4)
incq %r15
cmpq $15, %r15
jne .LBB3_6
# %bb.7: # in Loop: Header=BB3_5 Depth=1
incq %r14
addq $60, %rbx
cmpq $15, %r14
jne .LBB3_5
# %bb.8:
movabsq $137438953504, %rbx # imm = 0x2000000020
movabsq $4294967297, %r14 # imm = 0x100000001
leaq 80(%rsp), %rdi
movl $900, %esi # imm = 0x384
callq hipMalloc
leaq 72(%rsp), %rdi
movl $900, %esi # imm = 0x384
callq hipMalloc
leaq 104(%rsp), %rdi
movl $900, %esi # imm = 0x384
callq hipMalloc
movq 80(%rsp), %rdi
movq 96(%rsp), %rsi
movl $900, %edx # imm = 0x384
movl $1, %ecx
callq hipMemcpy
movq 72(%rsp), %rdi
movq 88(%rsp), %rsi
movl $900, %edx # imm = 0x384
movl $1, %ecx
callq hipMemcpy
movl $.L.str, %edi
movl $900, %ecx # imm = 0x384
movl $15, %esi
movl $15, %edx
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $900, %ecx # imm = 0x384
movl $15, %esi
movl $15, %edx
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movl $900, %ecx # imm = 0x384
movl $15, %esi
movl $15, %edx
xorl %eax, %eax
callq printf
movl $.L.str.3, %edi
movl $2700, %esi # imm = 0xA8C
xorl %eax, %eax
callq printf
movl $.L.str.4, %edi
xorl %esi, %esi
xorl %eax, %eax
callq printf
movl $.Lstr, %edi
callq puts@PLT
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_10
# %bb.9:
movq 80(%rsp), %rax
movq %rax, 64(%rsp)
movl $15, 8(%rsp)
movl $15, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
movq %rsp, %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z14matrixTriUpperPfii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_10:
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_12
# %bb.11:
movq 72(%rsp), %rax
movq %rax, 64(%rsp)
movl $15, 8(%rsp)
movl $15, (%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
movq %rsp, %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z14matrixTriUpperPfii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_12:
movq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_14
# %bb.13:
movq 80(%rsp), %rax
movq 72(%rsp), %rcx
movq 104(%rsp), %rdx
movq %rax, 64(%rsp)
movq %rcx, 56(%rsp)
movq %rdx, 48(%rsp)
movl $15, 124(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rax
movq %rax, 144(%rsp)
leaq 124(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z16squareMatrixMultPfS_S_i, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_14:
movq 112(%rsp), %rdi
movq 104(%rsp), %rsi
movl $900, %edx # imm = 0x384
movl $2, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
movl $.Lstr.1, %edi
callq puts@PLT
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_15: # %.preheader172
# =>This Loop Header: Depth=1
# Child Loop BB3_16 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_16: # Parent Loop BB3_15 Depth=1
# => This Inner Loop Header: Depth=2
movq 96(%rsp), %rax
addq %rbx, %rax
movss (%rax,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %edi
movb $1, %al
callq printf
incq %r15
cmpq $15, %r15
jne .LBB3_16
# %bb.17: # in Loop: Header=BB3_15 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $60, %rbx
cmpq $15, %r14
jne .LBB3_15
# %bb.18:
movl $.Lstr.2, %edi
callq puts@PLT
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_19: # %.preheader171
# =>This Loop Header: Depth=1
# Child Loop BB3_20 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_20: # Parent Loop BB3_19 Depth=1
# => This Inner Loop Header: Depth=2
movq 88(%rsp), %rax
addq %rbx, %rax
movss (%rax,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %edi
movb $1, %al
callq printf
incq %r15
cmpq $15, %r15
jne .LBB3_20
# %bb.21: # in Loop: Header=BB3_19 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $60, %rbx
cmpq $15, %r14
jne .LBB3_19
# %bb.22:
movl $.Lstr.3, %edi
callq puts@PLT
xorl %ebx, %ebx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_23: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_24 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_24: # Parent Loop BB3_23 Depth=1
# => This Inner Loop Header: Depth=2
movq 112(%rsp), %rax
addq %rbx, %rax
movss (%rax,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.7, %edi
movb $1, %al
callq printf
incq %r15
cmpq $15, %r15
jne .LBB3_24
# %bb.25: # in Loop: Header=BB3_23 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $60, %rbx
cmpq $15, %r14
jne .LBB3_23
# %bb.26:
movq 80(%rsp), %rdi
callq hipFree
movq 72(%rsp), %rdi
callq hipFree
movq 104(%rsp), %rdi
callq hipFree
movq 96(%rsp), %rdi
callq hipHostFree
movq 88(%rsp), %rdi
callq hipHostFree
movq 112(%rsp), %rdi
callq hipHostFree
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14matrixTriUpperPfii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10matrixMultPfS_S_iii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16squareMatrixMultPfS_S_i, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14matrixTriUpperPfii,@object # @_Z14matrixTriUpperPfii
.section .rodata,"a",@progbits
.globl _Z14matrixTriUpperPfii
.p2align 3, 0x0
_Z14matrixTriUpperPfii:
.quad _Z29__device_stub__matrixTriUpperPfii
.size _Z14matrixTriUpperPfii, 8
.type _Z10matrixMultPfS_S_iii,@object # @_Z10matrixMultPfS_S_iii
.globl _Z10matrixMultPfS_S_iii
.p2align 3, 0x0
_Z10matrixMultPfS_S_iii:
.quad _Z25__device_stub__matrixMultPfS_S_iii
.size _Z10matrixMultPfS_S_iii, 8
.type _Z16squareMatrixMultPfS_S_i,@object # @_Z16squareMatrixMultPfS_S_i
.globl _Z16squareMatrixMultPfS_S_i
.p2align 3, 0x0
_Z16squareMatrixMultPfS_S_i:
.quad _Z31__device_stub__squareMatrixMultPfS_S_i
.size _Z16squareMatrixMultPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "size of matA %dX%d: %zu bytes\n"
.size .L.str, 31
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "size of matB %dX%d: %zu bytes\n"
.size .L.str.1, 31
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "size of matC %dX%d: %zu bytes\n"
.size .L.str.2, 31
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "total bytes allocated to mem: %zu bytes "
.size .L.str.3, 41
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "(~%zu MBytes)\n\n"
.size .L.str.4, 16
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz " %f "
.size .L.str.7, 5
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14matrixTriUpperPfii"
.size .L__unnamed_1, 23
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z10matrixMultPfS_S_iii"
.size .L__unnamed_2, 24
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z16squareMatrixMultPfS_S_i"
.size .L__unnamed_3, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Calculating...\n"
.size .Lstr, 16
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "matA matrix: "
.size .Lstr.1, 14
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\nmatB matrix: "
.size .Lstr.2, 15
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "\nResult matrix: "
.size .Lstr.3, 17
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__matrixTriUpperPfii
.addrsig_sym _Z25__device_stub__matrixMultPfS_S_iii
.addrsig_sym _Z31__device_stub__squareMatrixMultPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14matrixTriUpperPfii
.addrsig_sym _Z10matrixMultPfS_S_iii
.addrsig_sym _Z16squareMatrixMultPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <iostream>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error!=cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
} \
} \
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = true;
for(int i=0; i<N; i++){
if (abs(hostRef[i]-gpuRef[i])>epsilon){
match = false;
std::cout << "Arrays do not match!" << std::endl;
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i],i);
break;
}
}
if (match) std::cout << "Arrays match. " << std::endl;
}
void initialData(float *ip, int size) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for(int i=0; i<size; i++) {
ip[i] = (float)(rand()&0xFF)/10.f;
}
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) {
float *ia = A;
float *ib = B;
float *ic = C;
for(int iy=0; iy<ny; iy++) {
for(int ix=0; ix<nx; ix++) {
ic[ix]=ia[ix]+ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
}
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny) {
unsigned int ix = threadIdx.x+blockIdx.x*blockDim.x;
unsigned int iy = threadIdx.y+blockIdx.y*blockDim.y;
unsigned int idx = iy*nx+ix;
if (ix<nx && iy<ny) {
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main (int argc, char **argv) {
std::cout << argv[0] << " Starting..." << std::endl;
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
std::cout << "Using Device " << dev << ": " << deviceProp.name << std::endl;
CHECK(cudaSetDevice(dev));
// set up data size of matrix
int nx = 1<<12;
int ny = 1<<12;
int nxy=nx*ny;
int nBytes=nxy*sizeof(float);
std::cout << "Matrix size: " << nx << ", " << ny << std::endl;
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A =(float *)malloc(nBytes);
h_B =(float *)malloc(nBytes);
hostRef=(float *)malloc(nBytes);
gpuRef =(float *)malloc(nBytes);
// cpuSecond()
double iStart, iElaps;
// initial dataset at host site
iStart=cpuSecond();
initialData(h_A, nxy);
initialData(h_B, nxy);
iElaps=cpuSecond()-iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart=cpuSecond();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps=cpuSecond()-iStart;
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, nBytes);
cudaMalloc((void **)&d_B, nBytes);
cudaMalloc((void **)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
int dimx=32;
int dimy=32;
dim3 block(dimx, dimy);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
iStart=cpuSecond();
sumMatrixOnGPU2D <<<grid, block>>> (d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
iElaps=cpuSecond()-iStart;
std::cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), ("
<< block.x << ", " << block.y << ") >>>" <<
"Time elapsed " << iElaps << std::endl;
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return 0;
} | code for sm_80
Function : _Z16sumMatrixOnGPU2DPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e680000002100 */
/*0040*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e620000002500 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f06070 */
/*0070*/ IMAD R0, R5, c[0x0][0x0], R0 ; /* 0x0000000005007a24 */
/* 0x002fca00078e0200 */
/*0080*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706470 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */
/* 0x000fe200078e0200 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE.U32 R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0007 */
/*00e0*/ IMAD.WIDE.U32 R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0007 */
/*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE.U32 R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0007 */
/*0120*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <iostream>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error!=cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
} \
} \
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = true;
for(int i=0; i<N; i++){
if (abs(hostRef[i]-gpuRef[i])>epsilon){
match = false;
std::cout << "Arrays do not match!" << std::endl;
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i],i);
break;
}
}
if (match) std::cout << "Arrays match. " << std::endl;
}
void initialData(float *ip, int size) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for(int i=0; i<size; i++) {
ip[i] = (float)(rand()&0xFF)/10.f;
}
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) {
float *ia = A;
float *ib = B;
float *ic = C;
for(int iy=0; iy<ny; iy++) {
for(int ix=0; ix<nx; ix++) {
ic[ix]=ia[ix]+ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
}
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny) {
unsigned int ix = threadIdx.x+blockIdx.x*blockDim.x;
unsigned int iy = threadIdx.y+blockIdx.y*blockDim.y;
unsigned int idx = iy*nx+ix;
if (ix<nx && iy<ny) {
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main (int argc, char **argv) {
std::cout << argv[0] << " Starting..." << std::endl;
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
std::cout << "Using Device " << dev << ": " << deviceProp.name << std::endl;
CHECK(cudaSetDevice(dev));
// set up data size of matrix
int nx = 1<<12;
int ny = 1<<12;
int nxy=nx*ny;
int nBytes=nxy*sizeof(float);
std::cout << "Matrix size: " << nx << ", " << ny << std::endl;
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A =(float *)malloc(nBytes);
h_B =(float *)malloc(nBytes);
hostRef=(float *)malloc(nBytes);
gpuRef =(float *)malloc(nBytes);
// cpuSecond()
double iStart, iElaps;
// initial dataset at host site
iStart=cpuSecond();
initialData(h_A, nxy);
initialData(h_B, nxy);
iElaps=cpuSecond()-iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart=cpuSecond();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps=cpuSecond()-iStart;
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, nBytes);
cudaMalloc((void **)&d_B, nBytes);
cudaMalloc((void **)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
int dimx=32;
int dimy=32;
dim3 block(dimx, dimy);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
iStart=cpuSecond();
sumMatrixOnGPU2D <<<grid, block>>> (d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
iElaps=cpuSecond()-iStart;
std::cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), ("
<< block.x << ", " << block.y << ") >>>" <<
"Time elapsed " << iElaps << std::endl;
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return 0;
} | .file "tmpxft_0018f34e_00000000-6_sumMatrixOnGPU-2D-grid-2D-block.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3676:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3676:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9cpuSecondv
.type _Z9cpuSecondv, @function
_Z9cpuSecondv:
.LFB3669:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z9cpuSecondv, .-_Z9cpuSecondv
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "Arrays do not match!"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "host %5.2f gpu %5.2f at current %d\n"
.section .rodata.str1.1
.LC5:
.string "Arrays match. "
.text
.globl _Z11checkResultPfS_i
.type _Z11checkResultPfS_i, @function
_Z11checkResultPfS_i:
.LFB3670:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
testl %edx, %edx
jle .L8
movl $0, %ebx
movss .LC1(%rip), %xmm2
movsd .LC2(%rip), %xmm1
.L15:
movq %rdi, %r12
movq %rsi, %rbp
movss (%rdi), %xmm0
subss (%rsi), %xmm0
andps %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm1, %xmm0
ja .L23
addl $1, %ebx
addq $4, %rdi
addq $4, %rsi
cmpl %ebx, %edx
jne .L15
.L8:
movl $14, %edx
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbx
testq %rbx, %rbx
je .L24
cmpb $0, 56(%rbx)
je .L17
movzbl 67(%rbx), %esi
.L18:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
.L7:
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
movl $20, %edx
leaq .LC3(%rip), %rsi
leaq _ZSt4cout(%rip), %r13
movq %r13, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%r13,%rax), %r13
testq %r13, %r13
je .L25
cmpb $0, 56(%r13)
je .L12
movzbl 67(%r13), %esi
.L13:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
pxor %xmm0, %xmm0
cvtss2sd (%r12), %xmm0
movl %ebx, %edx
pxor %xmm1, %xmm1
cvtss2sd 0(%rbp), %xmm1
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
jmp .L7
.L25:
call _ZSt16__throw_bad_castv@PLT
.L12:
movq %r13, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%r13), %rax
movl $10, %esi
movq %r13, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L13
.L24:
call _ZSt16__throw_bad_castv@PLT
.L17:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L18
.cfi_endproc
.LFE3670:
.size _Z11checkResultPfS_i, .-_Z11checkResultPfS_i
.globl _Z11initialDataPfi
.type _Z11initialDataPfi, @function
_Z11initialDataPfi:
.LFB3671:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $16, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %r12
movl %esi, %ebp
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
call time@PLT
movl %eax, %edi
call srand@PLT
testl %ebp, %ebp
jle .L26
movq %r12, %rbx
movslq %ebp, %rbp
leaq (%r12,%rbp,4), %rbp
.L28:
call rand@PLT
movzbl %al, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC6(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L28
.L26:
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size _Z11initialDataPfi, .-_Z11initialDataPfi
.globl _Z15sumMatrixOnHostPfS_S_ii
.type _Z15sumMatrixOnHostPfS_S_ii, @function
_Z15sumMatrixOnHostPfS_S_ii:
.LFB3672:
.cfi_startproc
endbr64
movq %rsi, %r9
testl %r8d, %r8d
jle .L33
movslq %ecx, %rsi
salq $2, %rsi
movl $0, %r10d
jmp .L35
.L36:
movss (%rdi,%rax), %xmm0
addss (%r9,%rax), %xmm0
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq %rax, %rsi
jne .L36
.L37:
addq %rsi, %rdi
addq %rsi, %r9
addq %rsi, %rdx
addl $1, %r10d
cmpl %r10d, %r8d
je .L33
.L35:
movl $0, %eax
testl %ecx, %ecx
jg .L36
jmp .L37
.L33:
ret
.cfi_endproc
.LFE3672:
.size _Z15sumMatrixOnHostPfS_S_ii, .-_Z15sumMatrixOnHostPfS_S_ii
.globl _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
.type _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii, @function
_Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii:
.LFB3698:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L45
.L41:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L46
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16sumMatrixOnGPU2DPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L41
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3698:
.size _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii, .-_Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.type _Z16sumMatrixOnGPU2DPfS_S_ii, @function
_Z16sumMatrixOnGPU2DPfS_S_ii:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _Z16sumMatrixOnGPU2DPfS_S_ii, .-_Z16sumMatrixOnGPU2DPfS_S_ii
.section .rodata.str1.1
.LC7:
.string " Starting..."
.section .rodata.str1.8
.align 8
.LC8:
.string "/home/ubuntu/Datasets/stackv2/train-structured/dhayakaw/cuda/master/PCCP/Chapter2/sumMatrixOnGPU-2D-grid-2D-block.cu"
.section .rodata.str1.1
.LC9:
.string "Error: %s:%d, "
.LC10:
.string "code: %d, reason: %s\n"
.LC11:
.string "Using Device "
.LC12:
.string ": "
.LC13:
.string "Matrix size: "
.LC14:
.string ", "
.LC15:
.string "sumMatrixOnGPU2D <<< ("
.LC16:
.string "), ("
.LC17:
.string ") >>>"
.LC18:
.string "Time elapsed "
.text
.globl main
.type main, @function
main:
.LFB3673:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $1104, %rsp
.cfi_def_cfa_offset 1152
movq %fs:40, %rax
movq %rax, 1096(%rsp)
xorl %eax, %eax
movq (%rsi), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC7(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq 64(%rsp), %rdi
movl $0, %esi
call cudaGetDeviceProperties_v2@PLT
testl %eax, %eax
jne .L55
.L50:
leaq .LC11(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $0, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC12(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq 64(%rsp), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %edi
call cudaSetDevice@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L56
.L51:
leaq .LC13(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $4096, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC14(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $4096, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $67108864, %edi
call malloc@PLT
movq %rax, %r13
movl $67108864, %edi
call malloc@PLT
movq %rax, %r12
movl $67108864, %edi
call malloc@PLT
movq %rax, %rbp
movl $67108864, %edi
call malloc@PLT
movq %rax, %rbx
call _Z9cpuSecondv
movl $16777216, %esi
movq %r13, %rdi
call _Z11initialDataPfi
movl $16777216, %esi
movq %r12, %rdi
call _Z11initialDataPfi
call _Z9cpuSecondv
movl $67108864, %edx
movl $0, %esi
movq %rbp, %rdi
call memset@PLT
movl $67108864, %edx
movl $0, %esi
movq %rbx, %rdi
call memset@PLT
call _Z9cpuSecondv
movl $4096, %r8d
movl $4096, %ecx
movq %rbp, %rdx
movq %r12, %rsi
movq %r13, %rdi
call _Z15sumMatrixOnHostPfS_S_ii
call _Z9cpuSecondv
leaq 16(%rsp), %rdi
movl $67108864, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $67108864, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $67108864, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $67108864, %edx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $67108864, %edx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 48(%rsp)
movl $1, 60(%rsp)
call _Z9cpuSecondv
movsd %xmm0, 8(%rsp)
movl $128, 52(%rsp)
movl $128, 56(%rsp)
movl $32, 40(%rsp)
movl $32, 44(%rsp)
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 52(%rsp), %rdi
movl 60(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L57
.L52:
call cudaDeviceSynchronize@PLT
call _Z9cpuSecondv
subsd 8(%rsp), %xmm0
movsd %xmm0, 8(%rsp)
leaq .LC15(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $128, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC14(%rip), %r14
movq %r14, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $128, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC16(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $32, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
movq %r14, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $32, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC17(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC18(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movsd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $2, %ecx
movl $67108864, %edx
movq 32(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl $16777216, %edx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z11checkResultPfS_i
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
call cudaDeviceReset@PLT
movq 1096(%rsp), %rax
subq %fs:40, %rax
jne .L58
movl $0, %eax
addq $1104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
movl %eax, %ebx
movl $75, %ecx
leaq .LC8(%rip), %rdx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %ebx, %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L50
.L56:
movl $77, %ecx
leaq .LC8(%rip), %rdx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %ebx, %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L51
.L57:
movl $4096, %r8d
movl $4096, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
jmp .L52
.L58:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3673:
.size main, .-main
.section .rodata.str1.1
.LC19:
.string "_Z16sumMatrixOnGPU2DPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3701:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z16sumMatrixOnGPU2DPfS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1598689907
.long 1051772663
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC1:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC2:
.long -500134854
.long 1044740494
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC6:
.long 1092616192
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <iostream>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error!=cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
} \
} \
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = true;
for(int i=0; i<N; i++){
if (abs(hostRef[i]-gpuRef[i])>epsilon){
match = false;
std::cout << "Arrays do not match!" << std::endl;
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i],i);
break;
}
}
if (match) std::cout << "Arrays match. " << std::endl;
}
void initialData(float *ip, int size) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for(int i=0; i<size; i++) {
ip[i] = (float)(rand()&0xFF)/10.f;
}
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) {
float *ia = A;
float *ib = B;
float *ic = C;
for(int iy=0; iy<ny; iy++) {
for(int ix=0; ix<nx; ix++) {
ic[ix]=ia[ix]+ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
}
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny) {
unsigned int ix = threadIdx.x+blockIdx.x*blockDim.x;
unsigned int iy = threadIdx.y+blockIdx.y*blockDim.y;
unsigned int idx = iy*nx+ix;
if (ix<nx && iy<ny) {
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main (int argc, char **argv) {
std::cout << argv[0] << " Starting..." << std::endl;
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
std::cout << "Using Device " << dev << ": " << deviceProp.name << std::endl;
CHECK(cudaSetDevice(dev));
// set up data size of matrix
int nx = 1<<12;
int ny = 1<<12;
int nxy=nx*ny;
int nBytes=nxy*sizeof(float);
std::cout << "Matrix size: " << nx << ", " << ny << std::endl;
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A =(float *)malloc(nBytes);
h_B =(float *)malloc(nBytes);
hostRef=(float *)malloc(nBytes);
gpuRef =(float *)malloc(nBytes);
// cpuSecond()
double iStart, iElaps;
// initial dataset at host site
iStart=cpuSecond();
initialData(h_A, nxy);
initialData(h_B, nxy);
iElaps=cpuSecond()-iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart=cpuSecond();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps=cpuSecond()-iStart;
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, nBytes);
cudaMalloc((void **)&d_B, nBytes);
cudaMalloc((void **)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
int dimx=32;
int dimy=32;
dim3 block(dimx, dimy);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
iStart=cpuSecond();
sumMatrixOnGPU2D <<<grid, block>>> (d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
iElaps=cpuSecond()-iStart;
std::cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), ("
<< block.x << ", " << block.y << ") >>>" <<
"Time elapsed " << iElaps << std::endl;
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include <sys/time.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if(error!=hipSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \
} \
} \
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = true;
for(int i=0; i<N; i++){
if (abs(hostRef[i]-gpuRef[i])>epsilon){
match = false;
std::cout << "Arrays do not match!" << std::endl;
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i],i);
break;
}
}
if (match) std::cout << "Arrays match. " << std::endl;
}
void initialData(float *ip, int size) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for(int i=0; i<size; i++) {
ip[i] = (float)(rand()&0xFF)/10.f;
}
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) {
float *ia = A;
float *ib = B;
float *ic = C;
for(int iy=0; iy<ny; iy++) {
for(int ix=0; ix<nx; ix++) {
ic[ix]=ia[ix]+ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
}
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny) {
unsigned int ix = threadIdx.x+blockIdx.x*blockDim.x;
unsigned int iy = threadIdx.y+blockIdx.y*blockDim.y;
unsigned int idx = iy*nx+ix;
if (ix<nx && iy<ny) {
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main (int argc, char **argv) {
std::cout << argv[0] << " Starting..." << std::endl;
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
std::cout << "Using Device " << dev << ": " << deviceProp.name << std::endl;
CHECK(hipSetDevice(dev));
// set up data size of matrix
int nx = 1<<12;
int ny = 1<<12;
int nxy=nx*ny;
int nBytes=nxy*sizeof(float);
std::cout << "Matrix size: " << nx << ", " << ny << std::endl;
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A =(float *)malloc(nBytes);
h_B =(float *)malloc(nBytes);
hostRef=(float *)malloc(nBytes);
gpuRef =(float *)malloc(nBytes);
// cpuSecond()
double iStart, iElaps;
// initial dataset at host site
iStart=cpuSecond();
initialData(h_A, nxy);
initialData(h_B, nxy);
iElaps=cpuSecond()-iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart=cpuSecond();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps=cpuSecond()-iStart;
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, nBytes);
hipMalloc((void **)&d_B, nBytes);
hipMalloc((void **)&d_C, nBytes);
// transfer data from host to device
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// invoke kernel at host side
int dimx=32;
int dimy=32;
dim3 block(dimx, dimy);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
iStart=cpuSecond();
sumMatrixOnGPU2D <<<grid, block>>> (d_A, d_B, d_C, nx, ny);
hipDeviceSynchronize();
iElaps=cpuSecond()-iStart;
std::cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), ("
<< block.x << ", " << block.y << ") >>>" <<
"Time elapsed " << iElaps << std::endl;
// copy kernel result back to host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
hipDeviceReset();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include <sys/time.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if(error!=hipSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \
} \
} \
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = true;
for(int i=0; i<N; i++){
if (abs(hostRef[i]-gpuRef[i])>epsilon){
match = false;
std::cout << "Arrays do not match!" << std::endl;
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i],i);
break;
}
}
if (match) std::cout << "Arrays match. " << std::endl;
}
void initialData(float *ip, int size) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for(int i=0; i<size; i++) {
ip[i] = (float)(rand()&0xFF)/10.f;
}
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) {
float *ia = A;
float *ib = B;
float *ic = C;
for(int iy=0; iy<ny; iy++) {
for(int ix=0; ix<nx; ix++) {
ic[ix]=ia[ix]+ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
}
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny) {
unsigned int ix = threadIdx.x+blockIdx.x*blockDim.x;
unsigned int iy = threadIdx.y+blockIdx.y*blockDim.y;
unsigned int idx = iy*nx+ix;
if (ix<nx && iy<ny) {
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main (int argc, char **argv) {
std::cout << argv[0] << " Starting..." << std::endl;
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
std::cout << "Using Device " << dev << ": " << deviceProp.name << std::endl;
CHECK(hipSetDevice(dev));
// set up data size of matrix
int nx = 1<<12;
int ny = 1<<12;
int nxy=nx*ny;
int nBytes=nxy*sizeof(float);
std::cout << "Matrix size: " << nx << ", " << ny << std::endl;
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A =(float *)malloc(nBytes);
h_B =(float *)malloc(nBytes);
hostRef=(float *)malloc(nBytes);
gpuRef =(float *)malloc(nBytes);
// cpuSecond()
double iStart, iElaps;
// initial dataset at host site
iStart=cpuSecond();
initialData(h_A, nxy);
initialData(h_B, nxy);
iElaps=cpuSecond()-iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart=cpuSecond();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps=cpuSecond()-iStart;
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, nBytes);
hipMalloc((void **)&d_B, nBytes);
hipMalloc((void **)&d_C, nBytes);
// transfer data from host to device
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// invoke kernel at host side
int dimx=32;
int dimy=32;
dim3 block(dimx, dimy);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
iStart=cpuSecond();
sumMatrixOnGPU2D <<<grid, block>>> (d_A, d_B, d_C, nx, ny);
hipDeviceSynchronize();
iElaps=cpuSecond()-iStart;
std::cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), ("
<< block.x << ", " << block.y << ") >>>" <<
"Time elapsed " << iElaps << std::endl;
// copy kernel result back to host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
hipDeviceReset();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16sumMatrixOnGPU2DPfS_S_ii
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.p2align 8
.type _Z16sumMatrixOnGPU2DPfS_S_ii,@function
_Z16sumMatrixOnGPU2DPfS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_u32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_u32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b128 s[8:11], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, s4, v[0:1]
v_mov_b32_e32 v3, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s8, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16sumMatrixOnGPU2DPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16sumMatrixOnGPU2DPfS_S_ii, .Lfunc_end0-_Z16sumMatrixOnGPU2DPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16sumMatrixOnGPU2DPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16sumMatrixOnGPU2DPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include <sys/time.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if(error!=hipSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \
} \
} \
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = true;
for(int i=0; i<N; i++){
if (abs(hostRef[i]-gpuRef[i])>epsilon){
match = false;
std::cout << "Arrays do not match!" << std::endl;
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i],i);
break;
}
}
if (match) std::cout << "Arrays match. " << std::endl;
}
void initialData(float *ip, int size) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for(int i=0; i<size; i++) {
ip[i] = (float)(rand()&0xFF)/10.f;
}
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) {
float *ia = A;
float *ib = B;
float *ic = C;
for(int iy=0; iy<ny; iy++) {
for(int ix=0; ix<nx; ix++) {
ic[ix]=ia[ix]+ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
}
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny) {
unsigned int ix = threadIdx.x+blockIdx.x*blockDim.x;
unsigned int iy = threadIdx.y+blockIdx.y*blockDim.y;
unsigned int idx = iy*nx+ix;
if (ix<nx && iy<ny) {
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main (int argc, char **argv) {
std::cout << argv[0] << " Starting..." << std::endl;
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
std::cout << "Using Device " << dev << ": " << deviceProp.name << std::endl;
CHECK(hipSetDevice(dev));
// set up data size of matrix
int nx = 1<<12;
int ny = 1<<12;
int nxy=nx*ny;
int nBytes=nxy*sizeof(float);
std::cout << "Matrix size: " << nx << ", " << ny << std::endl;
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A =(float *)malloc(nBytes);
h_B =(float *)malloc(nBytes);
hostRef=(float *)malloc(nBytes);
gpuRef =(float *)malloc(nBytes);
// cpuSecond()
double iStart, iElaps;
// initial dataset at host site
iStart=cpuSecond();
initialData(h_A, nxy);
initialData(h_B, nxy);
iElaps=cpuSecond()-iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart=cpuSecond();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps=cpuSecond()-iStart;
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, nBytes);
hipMalloc((void **)&d_B, nBytes);
hipMalloc((void **)&d_C, nBytes);
// transfer data from host to device
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// invoke kernel at host side
int dimx=32;
int dimy=32;
dim3 block(dimx, dimy);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
iStart=cpuSecond();
sumMatrixOnGPU2D <<<grid, block>>> (d_A, d_B, d_C, nx, ny);
hipDeviceSynchronize();
iElaps=cpuSecond()-iStart;
std::cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), ("
<< block.x << ", " << block.y << ") >>>" <<
"Time elapsed " << iElaps << std::endl;
// copy kernel result back to host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
hipDeviceReset();
return 0;
} | .text
.file "sumMatrixOnGPU-2D-grid-2D-block.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z9cpuSecondv
.LCPI0_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z9cpuSecondv
.p2align 4, 0x90
.type _Z9cpuSecondv,@function
_Z9cpuSecondv: # @_Z9cpuSecondv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z9cpuSecondv, .Lfunc_end0-_Z9cpuSecondv
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z11checkResultPfS_i
.LCPI1_0:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI1_1:
.quad 0x3e45798ee2308c3a # double 1.0E-8
.text
.globl _Z11checkResultPfS_i
.p2align 4, 0x90
.type _Z11checkResultPfS_i,@function
_Z11checkResultPfS_i: # @_Z11checkResultPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
testl %edx, %edx
jle .LBB1_9
# %bb.1: # %.lr.ph.preheader
movl %edx, %eax
xorl %ebx, %ebx
movaps .LCPI1_0(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movsd .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rbx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss (%rsi,%rbx,4), %xmm2
andps %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm1, %xmm2
ja .LBB1_3
# %bb.8: # in Loop: Header=BB1_2 Depth=1
incq %rbx
cmpq %rbx, %rax
jne .LBB1_2
.LBB1_9: # %.critedge
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB1_14
# %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i15
cmpb $0, 56(%rbx)
je .LBB1_12
# %bb.11:
movzbl 67(%rbx), %eax
jmp .LBB1_13
.LBB1_3:
movq %rdi, %r12
movq %rsi, %r15
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $20, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB1_14
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB1_6
# %bb.5:
movzbl 67(%r14), %eax
jmp .LBB1_7
.LBB1_12:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_13: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit18
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
jmp _ZNSo5flushEv # TAILCALL
.LBB1_6:
.cfi_def_cfa_offset 48
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movss (%r12,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movss (%r15,%rbx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movl $.L.str.1, %edi
movl %ebx, %esi
movb $2, %al
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.LBB1_14:
.cfi_def_cfa_offset 48
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size _Z11checkResultPfS_i, .Lfunc_end1-_Z11checkResultPfS_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z11initialDataPfi
.LCPI2_0:
.long 0x41200000 # float 10
.text
.globl _Z11initialDataPfi
.p2align 4, 0x90
.type _Z11initialDataPfi,@function
_Z11initialDataPfi: # @_Z11initialDataPfi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebp
movq %rdi, %rbx
movq %rsp, %rdi
callq time
movl %eax, %edi
callq srand
testl %ebp, %ebp
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI2_0(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB2_2
.LBB2_3: # %._crit_edge
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z11initialDataPfi, .Lfunc_end2-_Z11initialDataPfi
.cfi_endproc
# -- End function
.globl _Z15sumMatrixOnHostPfS_S_ii # -- Begin function _Z15sumMatrixOnHostPfS_S_ii
.p2align 4, 0x90
.type _Z15sumMatrixOnHostPfS_S_ii,@function
_Z15sumMatrixOnHostPfS_S_ii: # @_Z15sumMatrixOnHostPfS_S_ii
.cfi_startproc
# %bb.0:
testl %r8d, %r8d
jle .LBB3_6
# %bb.1: # %.preheader.lr.ph
movslq %ecx, %rax
movl %eax, %r9d
shlq $2, %rax
xorl %r10d, %r10d
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_5: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
incl %r10d
addq %rax, %rdx
addq %rax, %rsi
addq %rax, %rdi
cmpl %r8d, %r10d
je .LBB3_6
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
testl %ecx, %ecx
jle .LBB3_5
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB3_2 Depth=1
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB3_4: # %.lr.ph
# Parent Loop BB3_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdi,%r11,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rsi,%r11,4), %xmm0
movss %xmm0, (%rdx,%r11,4)
incq %r11
cmpq %r11, %r9
jne .LBB3_4
jmp .LBB3_5
.LBB3_6: # %._crit_edge30
retq
.Lfunc_end3:
.size _Z15sumMatrixOnHostPfS_S_ii, .Lfunc_end3-_Z15sumMatrixOnHostPfS_S_ii
.cfi_endproc
# -- End function
.globl _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii # -- Begin function _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.p2align 4, 0x90
.type _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii,@function
_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii: # @_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end4:
.size _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii, .Lfunc_end4-_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI5_0:
.long 0x41200000 # float 10
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI5_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1640, %rsp # imm = 0x668
.cfi_def_cfa_offset 1696
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq (%rsi), %rbx
testq %rbx, %rbx
je .LBB5_1
# %bb.2:
movq %rbx, %rdi
callq strlen
movl $_ZSt4cout, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB5_3
.LBB5_1:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB5_3: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB5_34
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB5_6
# %bb.5:
movzbl 67(%rbx), %eax
jmp .LBB5_7
.LBB5_6:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
leaq 168(%rsp), %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
testl %eax, %eax
je .LBB5_9
# %bb.8:
movl %eax, %ebx
movl $.L.str.4, %edi
movl $.L.str.5, %esi
movl $77, %edx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str.6, %edi
movl %ebx, %esi
movq %rax, %rdx
xorl %eax, %eax
callq printf
.LBB5_9:
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $13, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
xorl %esi, %esi
callq _ZNSolsEi
movq %rax, %rbx
movl $.L.str.8, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
leaq 168(%rsp), %r14
movq %r14, %rdi
callq strlen
movq %rbx, %rdi
movq %r14, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .LBB5_34
# %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i77
cmpb $0, 56(%r14)
je .LBB5_12
# %bb.11:
movzbl 67(%r14), %eax
jmp .LBB5_13
.LBB5_12:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_13: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit80
movsbl %al, %esi
movq %rbx, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %edi, %edi
callq hipSetDevice
testl %eax, %eax
je .LBB5_15
# %bb.14:
movl %eax, %ebx
movl $.L.str.4, %edi
movl $.L.str.5, %esi
movl $79, %edx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str.6, %edi
movl %ebx, %esi
movq %rax, %rdx
xorl %eax, %eax
callq printf
.LBB5_15:
movl $_ZSt4cout, %edi
movl $.L.str.9, %esi
movl $13, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $4096, %esi # imm = 0x1000
callq _ZNSolsEi
movq %rax, %rbx
movl $.L.str.10, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbx, %rdi
movl $4096, %esi # imm = 0x1000
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_34
# %bb.16: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i82
cmpb $0, 56(%rbx)
je .LBB5_18
# %bb.17:
movzbl 67(%rbx), %ecx
jmp .LBB5_19
.LBB5_18:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_19: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit85
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $67108864, %edi # imm = 0x4000000
callq malloc
movq %rax, %rbx
movl $67108864, %edi # imm = 0x4000000
callq malloc
movq %rax, %r14
movl $67108864, %edi # imm = 0x4000000
callq malloc
movq %rax, %r15
movl $67108864, %edi # imm = 0x4000000
callq malloc
movq %rax, %r12
xorl %r13d, %r13d
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
leaq 48(%rsp), %rdi
callq time
movl %eax, %edi
callq srand
.p2align 4, 0x90
.LBB5_20: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI5_0(%rip), %xmm0
movss %xmm0, (%rbx,%r13,4)
incq %r13
cmpq $16777216, %r13 # imm = 0x1000000
jne .LBB5_20
# %bb.21: # %_Z11initialDataPfi.exit
leaq 48(%rsp), %rdi
callq time
movl %eax, %edi
callq srand
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB5_22: # %.lr.ph.i67
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI5_0(%rip), %xmm0
movss %xmm0, (%r14,%r13,4)
incq %r13
cmpq $16777216, %r13 # imm = 0x1000000
jne .LBB5_22
# %bb.23: # %_Z11initialDataPfi.exit71
xorl %r13d, %r13d
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movl $67108864, %edx # imm = 0x4000000
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
movl $67108864, %edx # imm = 0x4000000
movq %r12, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq %r15, %rax
movq %r14, %rcx
movq %rbx, %rdx
.p2align 4, 0x90
.LBB5_24: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB5_25 Depth 2
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_25: # %.lr.ph.i72
# Parent Loop BB5_24 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdx,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rcx,%rsi,4), %xmm0
movss %xmm0, (%rax,%rsi,4)
incq %rsi
cmpq $4096, %rsi # imm = 0x1000
jne .LBB5_25
# %bb.26: # %._crit_edge.i
# in Loop: Header=BB5_24 Depth=1
addq $16384, %rdx # imm = 0x4000
addq $16384, %rcx # imm = 0x4000
addq $16384, %rax # imm = 0x4000
incl %r13d
cmpl $4096, %r13d # imm = 0x1000
jne .LBB5_24
# %bb.27: # %_Z15sumMatrixOnHostPfS_S_ii.exit
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
leaq 32(%rsp), %rdi
movl $67108864, %esi # imm = 0x4000000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $67108864, %esi # imm = 0x4000000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $67108864, %esi # imm = 0x4000000
callq hipMalloc
movq 32(%rsp), %rdi
movl $67108864, %edx # imm = 0x4000000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $67108864, %edx # imm = 0x4000000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 48(%rsp), %xmm0
cvtsi2sdq 56(%rsp), %xmm1
mulsd .LCPI5_1(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 8(%rsp) # 8-byte Spill
movabsq $549755814016, %rdi # imm = 0x8000000080
movabsq $137438953504, %rdx # imm = 0x2000000020
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_29
# %bb.28:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 160(%rsp)
movq %rcx, 152(%rsp)
movq %rdx, 144(%rsp)
movl $4096, 44(%rsp) # imm = 0x1000
movl $4096, 40(%rsp) # imm = 0x1000
leaq 160(%rsp), %rax
movq %rax, 48(%rsp)
leaq 152(%rsp), %rax
movq %rax, 56(%rsp)
leaq 144(%rsp), %rax
movq %rax, 64(%rsp)
leaq 44(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rax
movq %rax, 80(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %edi
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_29:
callq hipDeviceSynchronize
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 48(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 56(%rsp), %xmm1
mulsd .LCPI5_1(%rip), %xmm1
addsd %xmm0, %xmm1
subsd 8(%rsp), %xmm1 # 8-byte Folded Reload
movsd %xmm1, 8(%rsp) # 8-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.11, %esi
movl $22, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $128, %esi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r13
movl $.L.str.10, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $128, %esi
movq %r13, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r13
movl $.L.str.12, %esi
movl $4, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $32, %esi
movq %r13, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r13
movl $.L.str.10, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $32, %esi
movq %r13, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r13
movl $.L.str.13, %esi
movl $5, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.14, %esi
movl $13, %edx
movq %r13, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r13, %rdi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r13
testq %r13, %r13
je .LBB5_34
# %bb.30: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i87
cmpb $0, 56(%r13)
je .LBB5_32
# %bb.31:
movzbl 67(%r13), %ecx
jmp .LBB5_33
.LBB5_32:
movq %r13, %rdi
movq %rax, %rbp
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbp, %rax
.LBB5_33: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit90
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 16(%rsp), %rsi
movl $67108864, %edx # imm = 0x4000000
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
movq %r15, %rdi
movq %r12, %rsi
movl $16777216, %edx # imm = 0x1000000
callq _Z11checkResultPfS_i
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
callq hipDeviceReset
xorl %eax, %eax
addq $1640, %rsp # imm = 0x668
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB5_34:
.cfi_def_cfa_offset 1696
callq _ZSt16__throw_bad_castv
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Arrays do not match!"
.size .L.str, 21
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "host %5.2f gpu %5.2f at current %d\n"
.size .L.str.1, 36
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Arrays match. "
.size .L.str.2, 15
.type _Z16sumMatrixOnGPU2DPfS_S_ii,@object # @_Z16sumMatrixOnGPU2DPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.p2align 3, 0x0
_Z16sumMatrixOnGPU2DPfS_S_ii:
.quad _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.size _Z16sumMatrixOnGPU2DPfS_S_ii, 8
.type .L.str.3,@object # @.str.3
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.3:
.asciz " Starting..."
.size .L.str.3, 13
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Error: %s:%d, "
.size .L.str.4, 15
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/dhayakaw/cuda/master/PCCP/Chapter2/sumMatrixOnGPU-2D-grid-2D-block.hip"
.size .L.str.5, 128
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "code: %d, reason: %s\n"
.size .L.str.6, 22
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Using Device "
.size .L.str.7, 14
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz ": "
.size .L.str.8, 3
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Matrix size: "
.size .L.str.9, 14
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz ", "
.size .L.str.10, 3
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "sumMatrixOnGPU2D <<< ("
.size .L.str.11, 23
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "), ("
.size .L.str.12, 5
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz ") >>>"
.size .L.str.13, 6
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "Time elapsed "
.size .L.str.14, 14
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16sumMatrixOnGPU2DPfS_S_ii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z16sumMatrixOnGPU2DPfS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16sumMatrixOnGPU2DPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e680000002100 */
/*0040*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e620000002500 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f06070 */
/*0070*/ IMAD R0, R5, c[0x0][0x0], R0 ; /* 0x0000000005007a24 */
/* 0x002fca00078e0200 */
/*0080*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706470 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */
/* 0x000fe200078e0200 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE.U32 R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0007 */
/*00e0*/ IMAD.WIDE.U32 R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0007 */
/*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE.U32 R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e0007 */
/*0120*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16sumMatrixOnGPU2DPfS_S_ii
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.p2align 8
.type _Z16sumMatrixOnGPU2DPfS_S_ii,@function
_Z16sumMatrixOnGPU2DPfS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_u32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_u32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b128 s[8:11], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, s4, v[0:1]
v_mov_b32_e32 v3, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s8, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16sumMatrixOnGPU2DPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16sumMatrixOnGPU2DPfS_S_ii, .Lfunc_end0-_Z16sumMatrixOnGPU2DPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16sumMatrixOnGPU2DPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16sumMatrixOnGPU2DPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0018f34e_00000000-6_sumMatrixOnGPU-2D-grid-2D-block.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3676:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3676:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9cpuSecondv
.type _Z9cpuSecondv, @function
_Z9cpuSecondv:
.LFB3669:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z9cpuSecondv, .-_Z9cpuSecondv
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "Arrays do not match!"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "host %5.2f gpu %5.2f at current %d\n"
.section .rodata.str1.1
.LC5:
.string "Arrays match. "
.text
.globl _Z11checkResultPfS_i
.type _Z11checkResultPfS_i, @function
_Z11checkResultPfS_i:
.LFB3670:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
testl %edx, %edx
jle .L8
movl $0, %ebx
movss .LC1(%rip), %xmm2
movsd .LC2(%rip), %xmm1
.L15:
movq %rdi, %r12
movq %rsi, %rbp
movss (%rdi), %xmm0
subss (%rsi), %xmm0
andps %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
comisd %xmm1, %xmm0
ja .L23
addl $1, %ebx
addq $4, %rdi
addq $4, %rsi
cmpl %ebx, %edx
jne .L15
.L8:
movl $14, %edx
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbx
testq %rbx, %rbx
je .L24
cmpb $0, 56(%rbx)
je .L17
movzbl 67(%rbx), %esi
.L18:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
.L7:
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
movl $20, %edx
leaq .LC3(%rip), %rsi
leaq _ZSt4cout(%rip), %r13
movq %r13, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%r13,%rax), %r13
testq %r13, %r13
je .L25
cmpb $0, 56(%r13)
je .L12
movzbl 67(%r13), %esi
.L13:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
pxor %xmm0, %xmm0
cvtss2sd (%r12), %xmm0
movl %ebx, %edx
pxor %xmm1, %xmm1
cvtss2sd 0(%rbp), %xmm1
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
jmp .L7
.L25:
call _ZSt16__throw_bad_castv@PLT
.L12:
movq %r13, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%r13), %rax
movl $10, %esi
movq %r13, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L13
.L24:
call _ZSt16__throw_bad_castv@PLT
.L17:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L18
.cfi_endproc
.LFE3670:
.size _Z11checkResultPfS_i, .-_Z11checkResultPfS_i
.globl _Z11initialDataPfi
.type _Z11initialDataPfi, @function
_Z11initialDataPfi:
.LFB3671:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $16, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %r12
movl %esi, %ebp
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
call time@PLT
movl %eax, %edi
call srand@PLT
testl %ebp, %ebp
jle .L26
movq %r12, %rbx
movslq %ebp, %rbp
leaq (%r12,%rbp,4), %rbp
.L28:
call rand@PLT
movzbl %al, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC6(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L28
.L26:
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size _Z11initialDataPfi, .-_Z11initialDataPfi
.globl _Z15sumMatrixOnHostPfS_S_ii
.type _Z15sumMatrixOnHostPfS_S_ii, @function
_Z15sumMatrixOnHostPfS_S_ii:
.LFB3672:
.cfi_startproc
endbr64
movq %rsi, %r9
testl %r8d, %r8d
jle .L33
movslq %ecx, %rsi
salq $2, %rsi
movl $0, %r10d
jmp .L35
.L36:
movss (%rdi,%rax), %xmm0
addss (%r9,%rax), %xmm0
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq %rax, %rsi
jne .L36
.L37:
addq %rsi, %rdi
addq %rsi, %r9
addq %rsi, %rdx
addl $1, %r10d
cmpl %r10d, %r8d
je .L33
.L35:
movl $0, %eax
testl %ecx, %ecx
jg .L36
jmp .L37
.L33:
ret
.cfi_endproc
.LFE3672:
.size _Z15sumMatrixOnHostPfS_S_ii, .-_Z15sumMatrixOnHostPfS_S_ii
.globl _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
.type _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii, @function
_Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii:
.LFB3698:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L45
.L41:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L46
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16sumMatrixOnGPU2DPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L41
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3698:
.size _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii, .-_Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.type _Z16sumMatrixOnGPU2DPfS_S_ii, @function
_Z16sumMatrixOnGPU2DPfS_S_ii:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _Z16sumMatrixOnGPU2DPfS_S_ii, .-_Z16sumMatrixOnGPU2DPfS_S_ii
.section .rodata.str1.1
.LC7:
.string " Starting..."
.section .rodata.str1.8
.align 8
.LC8:
.string "/home/ubuntu/Datasets/stackv2/train-structured/dhayakaw/cuda/master/PCCP/Chapter2/sumMatrixOnGPU-2D-grid-2D-block.cu"
.section .rodata.str1.1
.LC9:
.string "Error: %s:%d, "
.LC10:
.string "code: %d, reason: %s\n"
.LC11:
.string "Using Device "
.LC12:
.string ": "
.LC13:
.string "Matrix size: "
.LC14:
.string ", "
.LC15:
.string "sumMatrixOnGPU2D <<< ("
.LC16:
.string "), ("
.LC17:
.string ") >>>"
.LC18:
.string "Time elapsed "
.text
.globl main
.type main, @function
main:
.LFB3673:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $1104, %rsp
.cfi_def_cfa_offset 1152
movq %fs:40, %rax
movq %rax, 1096(%rsp)
xorl %eax, %eax
movq (%rsi), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC7(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq 64(%rsp), %rdi
movl $0, %esi
call cudaGetDeviceProperties_v2@PLT
testl %eax, %eax
jne .L55
.L50:
leaq .LC11(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $0, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC12(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq 64(%rsp), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %edi
call cudaSetDevice@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L56
.L51:
leaq .LC13(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $4096, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC14(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $4096, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $67108864, %edi
call malloc@PLT
movq %rax, %r13
movl $67108864, %edi
call malloc@PLT
movq %rax, %r12
movl $67108864, %edi
call malloc@PLT
movq %rax, %rbp
movl $67108864, %edi
call malloc@PLT
movq %rax, %rbx
call _Z9cpuSecondv
movl $16777216, %esi
movq %r13, %rdi
call _Z11initialDataPfi
movl $16777216, %esi
movq %r12, %rdi
call _Z11initialDataPfi
call _Z9cpuSecondv
movl $67108864, %edx
movl $0, %esi
movq %rbp, %rdi
call memset@PLT
movl $67108864, %edx
movl $0, %esi
movq %rbx, %rdi
call memset@PLT
call _Z9cpuSecondv
movl $4096, %r8d
movl $4096, %ecx
movq %rbp, %rdx
movq %r12, %rsi
movq %r13, %rdi
call _Z15sumMatrixOnHostPfS_S_ii
call _Z9cpuSecondv
leaq 16(%rsp), %rdi
movl $67108864, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $67108864, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $67108864, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $67108864, %edx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $67108864, %edx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 48(%rsp)
movl $1, 60(%rsp)
call _Z9cpuSecondv
movsd %xmm0, 8(%rsp)
movl $128, 52(%rsp)
movl $128, 56(%rsp)
movl $32, 40(%rsp)
movl $32, 44(%rsp)
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 52(%rsp), %rdi
movl 60(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L57
.L52:
call cudaDeviceSynchronize@PLT
call _Z9cpuSecondv
subsd 8(%rsp), %xmm0
movsd %xmm0, 8(%rsp)
leaq .LC15(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $128, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC14(%rip), %r14
movq %r14, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $128, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC16(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $32, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
movq %r14, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $32, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC17(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC18(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movsd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $2, %ecx
movl $67108864, %edx
movq 32(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl $16777216, %edx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z11checkResultPfS_i
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
call cudaDeviceReset@PLT
movq 1096(%rsp), %rax
subq %fs:40, %rax
jne .L58
movl $0, %eax
addq $1104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
movl %eax, %ebx
movl $75, %ecx
leaq .LC8(%rip), %rdx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %ebx, %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L50
.L56:
movl $77, %ecx
leaq .LC8(%rip), %rdx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %ebx, %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L51
.L57:
movl $4096, %r8d
movl $4096, %ecx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z42__device_stub__Z16sumMatrixOnGPU2DPfS_S_iiPfS_S_ii
jmp .L52
.L58:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3673:
.size main, .-main
.section .rodata.str1.1
.LC19:
.string "_Z16sumMatrixOnGPU2DPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3701:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z16sumMatrixOnGPU2DPfS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1598689907
.long 1051772663
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC1:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC2:
.long -500134854
.long 1044740494
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC6:
.long 1092616192
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sumMatrixOnGPU-2D-grid-2D-block.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z9cpuSecondv
.LCPI0_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z9cpuSecondv
.p2align 4, 0x90
.type _Z9cpuSecondv,@function
_Z9cpuSecondv: # @_Z9cpuSecondv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z9cpuSecondv, .Lfunc_end0-_Z9cpuSecondv
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z11checkResultPfS_i
.LCPI1_0:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI1_1:
.quad 0x3e45798ee2308c3a # double 1.0E-8
.text
.globl _Z11checkResultPfS_i
.p2align 4, 0x90
.type _Z11checkResultPfS_i,@function
_Z11checkResultPfS_i: # @_Z11checkResultPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
testl %edx, %edx
jle .LBB1_9
# %bb.1: # %.lr.ph.preheader
movl %edx, %eax
xorl %ebx, %ebx
movaps .LCPI1_0(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movsd .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rbx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss (%rsi,%rbx,4), %xmm2
andps %xmm0, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm1, %xmm2
ja .LBB1_3
# %bb.8: # in Loop: Header=BB1_2 Depth=1
incq %rbx
cmpq %rbx, %rax
jne .LBB1_2
.LBB1_9: # %.critedge
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB1_14
# %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i15
cmpb $0, 56(%rbx)
je .LBB1_12
# %bb.11:
movzbl 67(%rbx), %eax
jmp .LBB1_13
.LBB1_3:
movq %rdi, %r12
movq %rsi, %r15
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $20, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB1_14
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB1_6
# %bb.5:
movzbl 67(%r14), %eax
jmp .LBB1_7
.LBB1_12:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_13: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit18
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
jmp _ZNSo5flushEv # TAILCALL
.LBB1_6:
.cfi_def_cfa_offset 48
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movss (%r12,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movss (%r15,%rbx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movl $.L.str.1, %edi
movl %ebx, %esi
movb $2, %al
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.LBB1_14:
.cfi_def_cfa_offset 48
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size _Z11checkResultPfS_i, .Lfunc_end1-_Z11checkResultPfS_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z11initialDataPfi
.LCPI2_0:
.long 0x41200000 # float 10
.text
.globl _Z11initialDataPfi
.p2align 4, 0x90
.type _Z11initialDataPfi,@function
_Z11initialDataPfi: # @_Z11initialDataPfi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebp
movq %rdi, %rbx
movq %rsp, %rdi
callq time
movl %eax, %edi
callq srand
testl %ebp, %ebp
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI2_0(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB2_2
.LBB2_3: # %._crit_edge
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z11initialDataPfi, .Lfunc_end2-_Z11initialDataPfi
.cfi_endproc
# -- End function
.globl _Z15sumMatrixOnHostPfS_S_ii # -- Begin function _Z15sumMatrixOnHostPfS_S_ii
.p2align 4, 0x90
.type _Z15sumMatrixOnHostPfS_S_ii,@function
_Z15sumMatrixOnHostPfS_S_ii: # @_Z15sumMatrixOnHostPfS_S_ii
.cfi_startproc
# %bb.0:
testl %r8d, %r8d
jle .LBB3_6
# %bb.1: # %.preheader.lr.ph
movslq %ecx, %rax
movl %eax, %r9d
shlq $2, %rax
xorl %r10d, %r10d
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_5: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
incl %r10d
addq %rax, %rdx
addq %rax, %rsi
addq %rax, %rdi
cmpl %r8d, %r10d
je .LBB3_6
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
testl %ecx, %ecx
jle .LBB3_5
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB3_2 Depth=1
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB3_4: # %.lr.ph
# Parent Loop BB3_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdi,%r11,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rsi,%r11,4), %xmm0
movss %xmm0, (%rdx,%r11,4)
incq %r11
cmpq %r11, %r9
jne .LBB3_4
jmp .LBB3_5
.LBB3_6: # %._crit_edge30
retq
.Lfunc_end3:
.size _Z15sumMatrixOnHostPfS_S_ii, .Lfunc_end3-_Z15sumMatrixOnHostPfS_S_ii
.cfi_endproc
# -- End function
.globl _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii # -- Begin function _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.p2align 4, 0x90
.type _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii,@function
_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii: # @_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end4:
.size _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii, .Lfunc_end4-_Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI5_0:
.long 0x41200000 # float 10
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI5_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1640, %rsp # imm = 0x668
.cfi_def_cfa_offset 1696
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq (%rsi), %rbx
testq %rbx, %rbx
je .LBB5_1
# %bb.2:
movq %rbx, %rdi
callq strlen
movl $_ZSt4cout, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB5_3
.LBB5_1:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB5_3: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB5_34
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB5_6
# %bb.5:
movzbl 67(%rbx), %eax
jmp .LBB5_7
.LBB5_6:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
leaq 168(%rsp), %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
testl %eax, %eax
je .LBB5_9
# %bb.8:
movl %eax, %ebx
movl $.L.str.4, %edi
movl $.L.str.5, %esi
movl $77, %edx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str.6, %edi
movl %ebx, %esi
movq %rax, %rdx
xorl %eax, %eax
callq printf
.LBB5_9:
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $13, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
xorl %esi, %esi
callq _ZNSolsEi
movq %rax, %rbx
movl $.L.str.8, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
leaq 168(%rsp), %r14
movq %r14, %rdi
callq strlen
movq %rbx, %rdi
movq %r14, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .LBB5_34
# %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i77
cmpb $0, 56(%r14)
je .LBB5_12
# %bb.11:
movzbl 67(%r14), %eax
jmp .LBB5_13
.LBB5_12:
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_13: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit80
movsbl %al, %esi
movq %rbx, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %edi, %edi
callq hipSetDevice
testl %eax, %eax
je .LBB5_15
# %bb.14:
movl %eax, %ebx
movl $.L.str.4, %edi
movl $.L.str.5, %esi
movl $79, %edx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str.6, %edi
movl %ebx, %esi
movq %rax, %rdx
xorl %eax, %eax
callq printf
.LBB5_15:
movl $_ZSt4cout, %edi
movl $.L.str.9, %esi
movl $13, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $4096, %esi # imm = 0x1000
callq _ZNSolsEi
movq %rax, %rbx
movl $.L.str.10, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbx, %rdi
movl $4096, %esi # imm = 0x1000
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_34
# %bb.16: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i82
cmpb $0, 56(%rbx)
je .LBB5_18
# %bb.17:
movzbl 67(%rbx), %ecx
jmp .LBB5_19
.LBB5_18:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_19: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit85
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $67108864, %edi # imm = 0x4000000
callq malloc
movq %rax, %rbx
movl $67108864, %edi # imm = 0x4000000
callq malloc
movq %rax, %r14
movl $67108864, %edi # imm = 0x4000000
callq malloc
movq %rax, %r15
movl $67108864, %edi # imm = 0x4000000
callq malloc
movq %rax, %r12
xorl %r13d, %r13d
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
leaq 48(%rsp), %rdi
callq time
movl %eax, %edi
callq srand
.p2align 4, 0x90
.LBB5_20: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI5_0(%rip), %xmm0
movss %xmm0, (%rbx,%r13,4)
incq %r13
cmpq $16777216, %r13 # imm = 0x1000000
jne .LBB5_20
# %bb.21: # %_Z11initialDataPfi.exit
leaq 48(%rsp), %rdi
callq time
movl %eax, %edi
callq srand
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB5_22: # %.lr.ph.i67
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI5_0(%rip), %xmm0
movss %xmm0, (%r14,%r13,4)
incq %r13
cmpq $16777216, %r13 # imm = 0x1000000
jne .LBB5_22
# %bb.23: # %_Z11initialDataPfi.exit71
xorl %r13d, %r13d
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movl $67108864, %edx # imm = 0x4000000
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
movl $67108864, %edx # imm = 0x4000000
movq %r12, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq %r15, %rax
movq %r14, %rcx
movq %rbx, %rdx
.p2align 4, 0x90
.LBB5_24: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB5_25 Depth 2
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_25: # %.lr.ph.i72
# Parent Loop BB5_24 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdx,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rcx,%rsi,4), %xmm0
movss %xmm0, (%rax,%rsi,4)
incq %rsi
cmpq $4096, %rsi # imm = 0x1000
jne .LBB5_25
# %bb.26: # %._crit_edge.i
# in Loop: Header=BB5_24 Depth=1
addq $16384, %rdx # imm = 0x4000
addq $16384, %rcx # imm = 0x4000
addq $16384, %rax # imm = 0x4000
incl %r13d
cmpl $4096, %r13d # imm = 0x1000
jne .LBB5_24
# %bb.27: # %_Z15sumMatrixOnHostPfS_S_ii.exit
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
leaq 32(%rsp), %rdi
movl $67108864, %esi # imm = 0x4000000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $67108864, %esi # imm = 0x4000000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $67108864, %esi # imm = 0x4000000
callq hipMalloc
movq 32(%rsp), %rdi
movl $67108864, %edx # imm = 0x4000000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $67108864, %edx # imm = 0x4000000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 48(%rsp), %xmm0
cvtsi2sdq 56(%rsp), %xmm1
mulsd .LCPI5_1(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 8(%rsp) # 8-byte Spill
movabsq $549755814016, %rdi # imm = 0x8000000080
movabsq $137438953504, %rdx # imm = 0x2000000020
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_29
# %bb.28:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 160(%rsp)
movq %rcx, 152(%rsp)
movq %rdx, 144(%rsp)
movl $4096, 44(%rsp) # imm = 0x1000
movl $4096, 40(%rsp) # imm = 0x1000
leaq 160(%rsp), %rax
movq %rax, 48(%rsp)
leaq 152(%rsp), %rax
movq %rax, 56(%rsp)
leaq 144(%rsp), %rax
movq %rax, 64(%rsp)
leaq 44(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rax
movq %rax, 80(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %edi
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_29:
callq hipDeviceSynchronize
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 48(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 56(%rsp), %xmm1
mulsd .LCPI5_1(%rip), %xmm1
addsd %xmm0, %xmm1
subsd 8(%rsp), %xmm1 # 8-byte Folded Reload
movsd %xmm1, 8(%rsp) # 8-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.11, %esi
movl $22, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $128, %esi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r13
movl $.L.str.10, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $128, %esi
movq %r13, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r13
movl $.L.str.12, %esi
movl $4, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $32, %esi
movq %r13, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r13
movl $.L.str.10, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $32, %esi
movq %r13, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r13
movl $.L.str.13, %esi
movl $5, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.14, %esi
movl $13, %edx
movq %r13, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r13, %rdi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r13
testq %r13, %r13
je .LBB5_34
# %bb.30: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i87
cmpb $0, 56(%r13)
je .LBB5_32
# %bb.31:
movzbl 67(%r13), %ecx
jmp .LBB5_33
.LBB5_32:
movq %r13, %rdi
movq %rax, %rbp
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbp, %rax
.LBB5_33: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit90
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 16(%rsp), %rsi
movl $67108864, %edx # imm = 0x4000000
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
movq %r15, %rdi
movq %r12, %rsi
movl $16777216, %edx # imm = 0x1000000
callq _Z11checkResultPfS_i
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
callq hipDeviceReset
xorl %eax, %eax
addq $1640, %rsp # imm = 0x668
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB5_34:
.cfi_def_cfa_offset 1696
callq _ZSt16__throw_bad_castv
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16sumMatrixOnGPU2DPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Arrays do not match!"
.size .L.str, 21
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "host %5.2f gpu %5.2f at current %d\n"
.size .L.str.1, 36
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Arrays match. "
.size .L.str.2, 15
.type _Z16sumMatrixOnGPU2DPfS_S_ii,@object # @_Z16sumMatrixOnGPU2DPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z16sumMatrixOnGPU2DPfS_S_ii
.p2align 3, 0x0
_Z16sumMatrixOnGPU2DPfS_S_ii:
.quad _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.size _Z16sumMatrixOnGPU2DPfS_S_ii, 8
.type .L.str.3,@object # @.str.3
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.3:
.asciz " Starting..."
.size .L.str.3, 13
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Error: %s:%d, "
.size .L.str.4, 15
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/dhayakaw/cuda/master/PCCP/Chapter2/sumMatrixOnGPU-2D-grid-2D-block.hip"
.size .L.str.5, 128
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "code: %d, reason: %s\n"
.size .L.str.6, 22
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Using Device "
.size .L.str.7, 14
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz ": "
.size .L.str.8, 3
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Matrix size: "
.size .L.str.9, 14
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz ", "
.size .L.str.10, 3
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "sumMatrixOnGPU2D <<< ("
.size .L.str.11, 23
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "), ("
.size .L.str.12, 5
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz ") >>>"
.size .L.str.13, 6
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "Time elapsed "
.size .L.str.14, 14
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16sumMatrixOnGPU2DPfS_S_ii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__sumMatrixOnGPU2DPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z16sumMatrixOnGPU2DPfS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /**
* APPROXIMATE PATTERN MATCHING
*
* INF560
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#define APM_DEBUG 0
char *
read_input_file( char * filename, int * size )
{
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open( filename, O_RDONLY ) ;
if ( fd == -1 )
{
fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ;
return NULL ;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
if ( fsize == -1 )
{
fprintf( stderr, "Unable to lseek to the end\n" ) ;
return NULL ;
}
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Go back to the beginning of the input file */
if ( lseek(fd, 0, SEEK_SET) == -1 )
{
fprintf( stderr, "Unable to lseek to start\n" ) ;
return NULL ;
}
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if ( buf == NULL )
{
fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n",
fsize ) ;
return NULL ;
}
n_bytes = read( fd, buf, fsize ) ;
if ( n_bytes != fsize )
{
fprintf( stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close( fd ) ;
return buf ;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
__host__ __device__ int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
__global__ void matchesKernel(int* d_n_matches, char * d_buf, char * d_pattern, int i, int size_pattern, int offset, int n_bytes, int approx_factor){
/* Traverse the input data up to the end of the file */
int j = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int distance = 0 ;
int size ;
size = size_pattern ;
int* columns = (int *) malloc((size_pattern + 1) * sizeof(int));
while (j < n_bytes) {
if (n_bytes - j < size_pattern ){
size = n_bytes - j ;
}
distance = levenshtein(d_pattern + offset, &d_buf[j], size, columns ) ;
if ( distance <= approx_factor) {
atomicAdd(&d_n_matches[i], 1);
}
j += stride;
}
free(columns);
}
int
main( int argc, char ** argv )
{
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
int i;
char * buf ;
struct timeval t1, t2;
double duration ;
int n_bytes ;
int * n_matches ;
/* Check number of arguments */
if ( argc < 4 )
{
printf( "Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0] ) ;
return 1 ;
}
/* Get the distance factor */
approx_factor = atoi( argv[1] ) ;
/* Grab the filename containing the target text */
filename = argv[2] ;
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3 ;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if ( pattern == NULL )
{
fprintf( stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns ) ;
return 1 ;
}
/* Grab the patterns */
for ( i = 0 ; i < nb_patterns ; i++ )
{
int l ;
l = strlen(argv[i+3]) ;
if ( l <= 0 )
{
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if ( pattern[i] == NULL )
{
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor ) ;
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL )
{
return 1 ;
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
for (i = 0; i < nb_patterns; i++) {
n_matches[i] = 0;
}
if ( n_matches == NULL )
{
fprintf( stderr, "Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int ) ) ;
return 1 ;
}
/*****
* BEGIN MAIN LOOP
******/
/* Timer start */
gettimeofday(&t1, NULL);
/* Check each pattern one by one */
int* d_n_matches;
char * d_pattern;
char* d_buf;
int* offset = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int* lens = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int sum_lens;
lens[0] = strlen(pattern[0]);
offset[0] = 0;
sum_lens = lens[0];
for (i = 1; i < nb_patterns; i++) {
offset[i] = offset[i-1] + lens[i-1];
lens[i] = strlen(pattern[i]);
sum_lens += lens[i];
}
char* concat_patterns = (char*) malloc( sum_lens * sizeof( char ) ) ;
for (i = 0; i < nb_patterns; i++) {
strcpy (concat_patterns + offset[i], pattern[i]);
}
cudaError_t error;
cudaMalloc((void **)&d_n_matches, nb_patterns*sizeof(int));
cudaMalloc((void **)&d_pattern, sum_lens*sizeof(char));
cudaMalloc((void **)&d_buf, n_bytes);
cudaMemcpy(d_pattern, concat_patterns, sum_lens*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_buf, buf, n_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_n_matches, n_matches, nb_patterns*sizeof(int), cudaMemcpyHostToDevice);
int Dg = 4;
int Db = 256;
for (i = 0; i < nb_patterns; i++) {
matchesKernel<<<Dg,Db>>>(d_n_matches, d_buf, d_pattern, i, lens[i], offset[i], n_bytes, approx_factor);
cudaGetLastError();
}
cudaMemcpy(n_matches, d_n_matches, nb_patterns*sizeof(int), cudaMemcpyDeviceToHost);
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* END MAIN LOOP
******/
for ( i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
} | code for sm_80
Function : _Z13matchesKernelPiPcS0_iiiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ MOV R2, 0x0 ; /* 0x0000000000027802 */
/* 0x000fe20000000f00 */
/*0020*/ S2R R16, SR_CTAID.X ; /* 0x0000000000107919 */
/* 0x000e220000002500 */
/*0030*/ ULDC UR4, c[0x0][0x17c] ; /* 0x00005f0000047ab9 */
/* 0x000fe40000000800 */
/*0040*/ UIADD3 UR4, UR4, 0x1, URZ ; /* 0x0000000104047890 */
/* 0x000fe2000fffe03f */
/*0050*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0060*/ LDC.64 R2, c[0x4][R2] ; /* 0x0100000002027b82 */
/* 0x000e640000000a00 */
/*0070*/ UIMAD.WIDE UR4, UR4, 0x4, URZ ; /* 0x00000004040478a5 */
/* 0x000fcc000f8e023f */
/*0080*/ IMAD.U32 R7, RZ, RZ, UR5 ; /* 0x00000005ff077e24 */
/* 0x000fe4000f8e00ff */
/*0090*/ IMAD.U32 R5, RZ, RZ, UR5 ; /* 0x00000005ff057e24 */
/* 0x000fe4000f8e00ff */
/*00a0*/ IMAD.U32 R4, RZ, RZ, UR4 ; /* 0x00000004ff047e24 */
/* 0x000fe4000f8e00ff */
/*00b0*/ IMAD.U32 R6, RZ, RZ, UR4 ; /* 0x00000004ff067e24 */
/* 0x000fe4000f8e00ff */
/*00c0*/ IMAD.MOV.U32 R5, RZ, RZ, R7 ; /* 0x000000ffff057224 */
/* 0x000fe400078e0007 */
/*00d0*/ IMAD R16, R16, c[0x0][0x0], R9 ; /* 0x0000000010107a24 */
/* 0x001fc400078e0209 */
/*00e0*/ LEPC R6 ; /* 0x000000000006734e */
/* 0x000fe20000000000 */
/*00f0*/ MOV R9, 0x160 ; /* 0x0000016000097802 */
/* 0x000fc40000000f00 */
/*0100*/ MOV R20, 0xe0 ; /* 0x000000e000147802 */
/* 0x000fe40000000f00 */
/*0110*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*0120*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0130*/ IADD3 R20, P0, P1, -R20, R9, R6 ; /* 0x0000000914147210 */
/* 0x000fc8000791e106 */
/*0140*/ IADD3.X R21, ~R0, R21, R7, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2507 */
/*0150*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*0160*/ ISETP.GE.AND P0, PT, R16, c[0x0][0x184], PT ; /* 0x0000610010007a0c */
/* 0x000fe20003f06270 */
/*0170*/ BSSY B1, 0x8c0 ; /* 0x0000074000017945 */
/* 0x000fd80003800000 */
/*0180*/ @P0 BRA 0x8b0 ; /* 0x0000072000000947 */
/* 0x000fea0003800000 */
/*0190*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff007624 */
/* 0x000fe400078e00ff */
/*01a0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff027624 */
/* 0x000fe400078e00ff */
/*01b0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*01c0*/ SHF.R.S32.HI R0, RZ, 0x1f, R0 ; /* 0x0000001fff007819 */
/* 0x000fe20000011400 */
/*01d0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff077624 */
/* 0x000fe400078e00ff */
/*01e0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc800078e0203 */
/*01f0*/ IADD3 R6, -R16, c[0x0][0x184], RZ ; /* 0x0000610010067a10 */
/* 0x000fe20007ffe1ff */
/*0200*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*0210*/ BSSY B0, 0x790 ; /* 0x0000057000007945 */
/* 0x000fe40003800000 */
/*0220*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x17c], PT ; /* 0x00005f0006007a0c */
/* 0x000fc80003f06270 */
/*0230*/ SEL R7, R6, R7, !P0 ; /* 0x0000000706077207 */
/* 0x000fc80004000000 */
/*0240*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fda0003f05270 */
/*0250*/ @!P0 BRA 0x780 ; /* 0x0000052000008947 */
/* 0x001fea0003800000 */
/*0260*/ ISETP.GT.U32.AND P0, PT, R7, 0x1, PT ; /* 0x000000010700780c */
/* 0x000fe20003f04070 */
/*0270*/ IMAD.MOV.U32 R6, RZ, RZ, 0x1 ; /* 0x00000001ff067424 */
/* 0x000fe200078e00ff */
/*0280*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0290*/ BSSY B2, 0x430 ; /* 0x0000019000027945 */
/* 0x000ff40003800000 */
/*02a0*/ @!P0 IMAD.WIDE.U32 R8, R6, 0x4, R4 ; /* 0x0000000406088825 */
/* 0x000fca00078e0004 */
/*02b0*/ @!P0 ST.E [R8.64], R6 ; /* 0x0000000608008985 */
/* 0x0001e4000c101904 */
/*02c0*/ @!P0 IMAD.MOV.U32 R6, RZ, RZ, 0x2 ; /* 0x00000002ff068424 */
/* 0x001fc800078e00ff */
/*02d0*/ IMAD.IADD R10, R7.reuse, 0x1, -R6 ; /* 0x00000001070a7824 */
/* 0x040fe200078e0a06 */
/*02e0*/ ISETP.GT.U32.AND P1, PT, R7, R6, PT ; /* 0x000000060700720c */
/* 0x000fc80003f24070 */
/*02f0*/ ISETP.LE.U32.OR P1, PT, R10, 0x2, !P1 ; /* 0x000000020a00780c */
/* 0x000fda0004f23470 */
/*0300*/ @P1 BRA 0x420 ; /* 0x0000011000001947 */
/* 0x000fea0003800000 */
/*0310*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0320*/ IADD3 R23, R7, -0x2, RZ ; /* 0xfffffffe07177810 */
/* 0x000fe40007ffe0ff */
/*0330*/ IMAD.WIDE.U32 R8, R6.reuse, 0x4, R4.reuse ; /* 0x0000000406087825 */
/* 0x140fe200078e0004 */
/*0340*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0350*/ IADD3 R17, R6.reuse, 0x1, RZ ; /* 0x0000000106117810 */
/* 0x040fe40007ffe0ff */
/*0360*/ IADD3 R19, R6.reuse, 0x2, RZ ; /* 0x0000000206137810 */
/* 0x040fe20007ffe0ff */
/*0370*/ ST.E [R8.64], R6 ; /* 0x0000000608007985 */
/* 0x0001e2000c101904 */
/*0380*/ IADD3 R21, R6, 0x3, RZ ; /* 0x0000000306157810 */
/* 0x000fe20007ffe0ff */
/*0390*/ IMAD.WIDE.U32 R10, R17, 0x4, R4 ; /* 0x00000004110a7825 */
/* 0x000fc800078e0004 */
/*03a0*/ IMAD.WIDE.U32 R12, R19, 0x4, R4.reuse ; /* 0x00000004130c7825 */
/* 0x100fe200078e0004 */
/*03b0*/ ST.E [R10.64], R17 ; /* 0x000000110a007985 */
/* 0x0003e6000c101904 */
/*03c0*/ IMAD.WIDE.U32 R14, R21, 0x4, R4 ; /* 0x00000004150e7825 */
/* 0x000fe200078e0004 */
/*03d0*/ ST.E [R12.64], R19 ; /* 0x000000130c007985 */
/* 0x0003e2000c101904 */
/*03e0*/ IADD3 R6, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x001fc60007ffe0ff */
/*03f0*/ ST.E [R14.64], R21 ; /* 0x000000150e007985 */
/* 0x0003e2000c101904 */
/*0400*/ ISETP.GE.U32.AND P1, PT, R6, R23, PT ; /* 0x000000170600720c */
/* 0x000fda0003f26070 */
/*0410*/ @!P1 BRA 0x330 ; /* 0xffffff1000009947 */
/* 0x002fea000383ffff */
/*0420*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0430*/ IMAD.IADD R8, R7.reuse, 0x1, -R6 ; /* 0x0000000107087824 */
/* 0x040fe200078e0a06 */
/*0440*/ ISETP.GT.U32.AND P1, PT, R7, R6, PT ; /* 0x000000060700720c */
/* 0x000fe20003f24070 */
/*0450*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0460*/ SHF.R.S32.HI R14, RZ, 0x1f, R16 ; /* 0x0000001fff0e7819 */
/* 0x000fe20000011410 */
/*0470*/ IMAD.MOV.U32 R15, RZ, RZ, 0x1 ; /* 0x00000001ff0f7424 */
/* 0x000fe200078e00ff */
/*0480*/ ISETP.LE.U32.OR P1, PT, R8, RZ, !P1 ; /* 0x000000ff0800720c */
/* 0x000fda0004f23470 */
/*0490*/ @!P1 PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000981c */
/* 0x000fe20003f0e170 */
/*04a0*/ @!P1 IMAD.WIDE.U32 R12, R6.reuse, 0x4, R4 ; /* 0x00000004060c9825 */
/* 0x040fe200078e0004 */
/*04b0*/ @!P1 IADD3 R17, R6, 0x1, RZ ; /* 0x0000000106119810 */
/* 0x000fc80007ffe0ff */
/*04c0*/ @!P1 ST.E [R12.64], R6 ; /* 0x000000060c009985 */
/* 0x0001e2000c101904 */
/*04d0*/ @!P1 IMAD.WIDE.U32 R10, R17, 0x4, R4 ; /* 0x00000004110a9825 */
/* 0x000fca00078e0004 */
/*04e0*/ @!P1 ST.E [R10.64], R17 ; /* 0x000000110a009985 */
/* 0x0003e2000c101904 */
/*04f0*/ @!P1 IADD3 R6, R6, 0x2, RZ ; /* 0x0000000206069810 */
/* 0x001fc80007ffe0ff */
/*0500*/ ISETP.LE.U32.OR P0, PT, R6, R7, P0 ; /* 0x000000070600720c */
/* 0x000fda0000703470 */
/*0510*/ @P0 IMAD.WIDE.U32 R8, R6, 0x4, R4 ; /* 0x0000000406080825 */
/* 0x000fca00078e0004 */
/*0520*/ @P0 ST.E [R8.64], R6 ; /* 0x0000000608000985 */
/* 0x0003e4000c101904 */
/*0530*/ IADD3 R17, R15, -0x1, RZ ; /* 0xffffffff0f117810 */
/* 0x002fe20007ffe0ff */
/*0540*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0550*/ IMAD.MOV.U32 R19, RZ, RZ, R15 ; /* 0x000000ffff137224 */
/* 0x001fe200078e000f */
/*0560*/ ST.E [R4.64], R15 ; /* 0x0000000f04007985 */
/* 0x0001e2000c101904 */
/*0570*/ IADD3 R8, P1, P2, R17, c[0x0][0x168], R16 ; /* 0x00005a0011087a10 */
/* 0x000fe20007a3e010 */
/*0580*/ BSSY B2, 0x770 ; /* 0x000001e000027945 */
/* 0x000fe20003800000 */
/*0590*/ IMAD.MOV.U32 R6, RZ, RZ, 0x1 ; /* 0x00000001ff067424 */
/* 0x000fe400078e00ff */
/*05a0*/ IADD3.X R9, RZ, c[0x0][0x16c], R14, P1, P2 ; /* 0x00005b00ff097a10 */
/* 0x000fe40000fe440e */
/*05b0*/ IADD3 R15, R15, 0x1, RZ ; /* 0x000000010f0f7810 */
/* 0x001fc80007ffe0ff */
/*05c0*/ ISETP.GT.U32.AND P0, PT, R15, R7, PT ; /* 0x000000070f00720c */
/* 0x000fe40003f04070 */
/*05d0*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff0d7624 */
/* 0x000fe200078e00ff */
/*05e0*/ IADD3 R12, R6.reuse, -0x1, RZ ; /* 0xffffffff060c7810 */
/* 0x040fe20007ffe0ff */
/*05f0*/ IMAD.WIDE.U32 R10, R6, 0x4, R4 ; /* 0x00000004060a7825 */
/* 0x001fe200078e0004 */
/*0600*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0610*/ IADD3 R12, P1, P2, R12, c[0x0][0x170], R13 ; /* 0x00005c000c0c7a10 */
/* 0x000fe20007a3e00d */
/*0620*/ LDG.E.U8 R18, [R8.64] ; /* 0x0000000408127981 */
/* 0x000ea6000c1e1100 */
/*0630*/ IADD3.X R13, RZ, c[0x0][0x174], R0, P1, P2 ; /* 0x00005d00ff0d7a10 */
/* 0x000fe20000fe4400 */
/*0640*/ LD.E R22, [R10.64] ; /* 0x000000040a167980 */
/* 0x000eea000c101900 */
/*0650*/ LDG.E.U8 R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea2000c1e1100 */
/*0660*/ IADD3 R6, R6, 0x1, RZ ; /* 0x0000000106067810 */
/* 0x000fc40007ffe0ff */
/*0670*/ ISETP.GE.AND P1, PT, R22, R19, PT ; /* 0x000000131600720c */
/* 0x008fc80003f26270 */
/*0680*/ ISETP.NE.OR P3, PT, R13.reuse, R18.reuse, !P1 ; /* 0x000000120d00720c */
/* 0x0c4fe40004f65670 */
/*0690*/ ISETP.NE.OR P2, PT, R13, R18, P1 ; /* 0x000000120d00720c */
/* 0x000fe40000f45670 */
/*06a0*/ IADD3 R18, R17, 0x1, RZ ; /* 0x0000000111127810 */
/* 0x000fca0007ffe0ff */
/*06b0*/ @P1 IMAD.MOV.U32 R20, RZ, RZ, R18 ; /* 0x000000ffff141224 */
/* 0x000fe200078e0012 */
/*06c0*/ @P1 IADD3 R19, R19, 0x1, RZ ; /* 0x0000000113131810 */
/* 0x000fc60007ffe0ff */
/*06d0*/ @!P3 IMAD.MOV R20, RZ, RZ, R17.reuse ; /* 0x000000ffff14b224 */
/* 0x100fe200078e0211 */
/*06e0*/ @!P1 IADD3 R12, R22, 0x1, RZ ; /* 0x00000001160c9810 */
/* 0x000fe20007ffe0ff */
/*06f0*/ @!P2 IMAD.MOV R18, RZ, RZ, R17 ; /* 0x000000ffff12a224 */
/* 0x000fc600078e0211 */
/*0700*/ @P1 IMNMX.U32 R19, R19, R20, PT ; /* 0x0000001413131217 */
/* 0x000fe40003800000 */
/*0710*/ @!P1 IMNMX.U32 R19, R12, R18, PT ; /* 0x000000120c139217 */
/* 0x000fe40003800000 */
/*0720*/ ISETP.GT.U32.AND P1, PT, R6, R7, PT ; /* 0x000000070600720c */
/* 0x000fc60003f24070 */
/*0730*/ ST.E [R10.64], R19 ; /* 0x000000130a007985 */
/* 0x0001e2000c101904 */
/*0740*/ IMAD.MOV.U32 R17, RZ, RZ, R22 ; /* 0x000000ffff117224 */
/* 0x000fd200078e0016 */
/*0750*/ @!P1 BRA 0x5d0 ; /* 0xfffffe7000009947 */
/* 0x000fea000383ffff */
/*0760*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0770*/ @!P0 BRA 0x530 ; /* 0xfffffdb000008947 */
/* 0x000fea000383ffff */
/*0780*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0790*/ IMAD.WIDE R8, R7, 0x4, R4 ; /* 0x0000000407087825 */
/* 0x000fe200078e0204 */
/*07a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fca0000000a00 */
/*07b0*/ LD.E R8, [R8.64] ; /* 0x0000000408087980 */
/* 0x000ea2000c101900 */
/*07c0*/ BSSY B0, 0x870 ; /* 0x000000a000007945 */
/* 0x000fe20003800000 */
/*07d0*/ ISETP.GT.AND P0, PT, R8, c[0x0][0x188], PT ; /* 0x0000620008007a0c */
/* 0x004fda0003f04270 */
/*07e0*/ @P0 BRA 0x860 ; /* 0x0000007000000947 */
/* 0x000fea0003800000 */
/*07f0*/ S2R R6, SR_LANEID ; /* 0x0000000000067919 */
/* 0x000e620000000000 */
/*0800*/ VOTEU.ANY UR4, UPT, PT ; /* 0x0000000000047886 */
/* 0x000fe400038e0100 */
/*0810*/ UFLO.U32 UR5, UR4 ; /* 0x00000004000572bd */
/* 0x000fe200080e0000 */
/*0820*/ POPC R9, UR4 ; /* 0x0000000400097d09 */
/* 0x000eaa0008000000 */
/*0830*/ ISETP.EQ.U32.AND P0, PT, R6, UR5, PT ; /* 0x0000000506007c0c */
/* 0x002fe2000bf02070 */
/*0840*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd80000000a00 */
/*0850*/ @P0 RED.E.ADD.STRONG.GPU [R2.64], R9 ; /* 0x000000090200098e */
/* 0x0043e4000c10e184 */
/*0860*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0870*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff097624 */
/* 0x002fc800078e00ff */
/*0880*/ IMAD R16, R9, c[0x0][0xc], R16 ; /* 0x0000030009107a24 */
/* 0x000fca00078e0210 */
/*0890*/ ISETP.GE.AND P0, PT, R16, c[0x0][0x184], PT ; /* 0x0000610010007a0c */
/* 0x000fda0003f06270 */
/*08a0*/ @!P0 BRA 0x1f0 ; /* 0xfffff94000008947 */
/* 0x000fea000383ffff */
/*08b0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*08c0*/ MOV R0, 0x8 ; /* 0x0000000800007802 */
/* 0x000fc80000000f00 */
/*08d0*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x0002a60000000a00 */
/*08e0*/ LEPC R6 ; /* 0x000000000006734e */
/* 0x000fe20000000000 */
/*08f0*/ MOV R9, 0x960 ; /* 0x0000096000097802 */
/* 0x000fe40000000f00 */
/*0900*/ MOV R20, 0x8e0 ; /* 0x000008e000147802 */
/* 0x000fe40000000f00 */
/*0910*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*0920*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x002fe40000000f00 */
/*0930*/ IADD3 R20, P0, P1, -R20, R9, R6 ; /* 0x0000000914147210 */
/* 0x000fc8000791e106 */
/*0940*/ IADD3.X R21, ~R0, R21, R7, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2507 */
/*0950*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x005fea0003c00000 */
/*0960*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0970*/ BRA 0x970; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0980*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0990*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /**
* APPROXIMATE PATTERN MATCHING
*
* INF560
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#define APM_DEBUG 0
char *
read_input_file( char * filename, int * size )
{
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open( filename, O_RDONLY ) ;
if ( fd == -1 )
{
fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ;
return NULL ;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
if ( fsize == -1 )
{
fprintf( stderr, "Unable to lseek to the end\n" ) ;
return NULL ;
}
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Go back to the beginning of the input file */
if ( lseek(fd, 0, SEEK_SET) == -1 )
{
fprintf( stderr, "Unable to lseek to start\n" ) ;
return NULL ;
}
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if ( buf == NULL )
{
fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n",
fsize ) ;
return NULL ;
}
n_bytes = read( fd, buf, fsize ) ;
if ( n_bytes != fsize )
{
fprintf( stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close( fd ) ;
return buf ;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
__host__ __device__ int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
__global__ void matchesKernel(int* d_n_matches, char * d_buf, char * d_pattern, int i, int size_pattern, int offset, int n_bytes, int approx_factor){
/* Traverse the input data up to the end of the file */
int j = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int distance = 0 ;
int size ;
size = size_pattern ;
int* columns = (int *) malloc((size_pattern + 1) * sizeof(int));
while (j < n_bytes) {
if (n_bytes - j < size_pattern ){
size = n_bytes - j ;
}
distance = levenshtein(d_pattern + offset, &d_buf[j], size, columns ) ;
if ( distance <= approx_factor) {
atomicAdd(&d_n_matches[i], 1);
}
j += stride;
}
free(columns);
}
int
main( int argc, char ** argv )
{
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
int i;
char * buf ;
struct timeval t1, t2;
double duration ;
int n_bytes ;
int * n_matches ;
/* Check number of arguments */
if ( argc < 4 )
{
printf( "Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0] ) ;
return 1 ;
}
/* Get the distance factor */
approx_factor = atoi( argv[1] ) ;
/* Grab the filename containing the target text */
filename = argv[2] ;
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3 ;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if ( pattern == NULL )
{
fprintf( stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns ) ;
return 1 ;
}
/* Grab the patterns */
for ( i = 0 ; i < nb_patterns ; i++ )
{
int l ;
l = strlen(argv[i+3]) ;
if ( l <= 0 )
{
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if ( pattern[i] == NULL )
{
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor ) ;
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL )
{
return 1 ;
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
for (i = 0; i < nb_patterns; i++) {
n_matches[i] = 0;
}
if ( n_matches == NULL )
{
fprintf( stderr, "Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int ) ) ;
return 1 ;
}
/*****
* BEGIN MAIN LOOP
******/
/* Timer start */
gettimeofday(&t1, NULL);
/* Check each pattern one by one */
int* d_n_matches;
char * d_pattern;
char* d_buf;
int* offset = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int* lens = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int sum_lens;
lens[0] = strlen(pattern[0]);
offset[0] = 0;
sum_lens = lens[0];
for (i = 1; i < nb_patterns; i++) {
offset[i] = offset[i-1] + lens[i-1];
lens[i] = strlen(pattern[i]);
sum_lens += lens[i];
}
char* concat_patterns = (char*) malloc( sum_lens * sizeof( char ) ) ;
for (i = 0; i < nb_patterns; i++) {
strcpy (concat_patterns + offset[i], pattern[i]);
}
cudaError_t error;
cudaMalloc((void **)&d_n_matches, nb_patterns*sizeof(int));
cudaMalloc((void **)&d_pattern, sum_lens*sizeof(char));
cudaMalloc((void **)&d_buf, n_bytes);
cudaMemcpy(d_pattern, concat_patterns, sum_lens*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_buf, buf, n_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_n_matches, n_matches, nb_patterns*sizeof(int), cudaMemcpyHostToDevice);
int Dg = 4;
int Db = 256;
for (i = 0; i < nb_patterns; i++) {
matchesKernel<<<Dg,Db>>>(d_n_matches, d_buf, d_pattern, i, lens[i], offset[i], n_bytes, approx_factor);
cudaGetLastError();
}
cudaMemcpy(n_matches, d_n_matches, nb_patterns*sizeof(int), cudaMemcpyDeviceToHost);
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* END MAIN LOOP
******/
for ( i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
} | .file "tmpxft_0012e5ff_00000000-6_apmGPU.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2079:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2079:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Unable to open the text file <%s>\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Unable to lseek to the end\n"
.LC2:
.string "Unable to lseek to start\n"
.section .rodata.str1.8
.align 8
.LC3:
.string "Unable to allocate %lld byte(s) for main array\n"
.align 8
.LC4:
.string "Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n"
.text
.globl _Z15read_input_filePcPi
.type _Z15read_input_filePcPi, @function
_Z15read_input_filePcPi:
.LFB2074:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbp
movq %rsi, %r13
movl $0, %esi
call __open_2@PLT
cmpl $-1, %eax
je .L11
movl %eax, %ebx
movl $2, %edx
movl $0, %esi
movl %eax, %edi
call lseek@PLT
movq %rax, %rbp
cmpq $-1, %rax
je .L12
movl $0, %edx
movl $0, %esi
movl %ebx, %edi
call lseek@PLT
cmpq $-1, %rax
je .L13
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r12
testq %rax, %rax
je .L14
movq %rbp, %rcx
movq %rbp, %rdx
movq %rax, %rsi
movl %ebx, %edi
call __read_chk@PLT
movq %rax, %r8
cltq
cmpq %rbp, %rax
jne .L15
movl %r8d, 0(%r13)
movl %ebx, %edi
call close@PLT
.L3:
movq %r12, %rax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
movq %rbp, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %r12d
jmp .L3
.L12:
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %r12d
jmp .L3
.L13:
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %r12d
jmp .L3
.L14:
movq %rbp, %rcx
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L3
.L15:
movq %rbp, %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %r12d
jmp .L3
.cfi_endproc
.LFE2074:
.size _Z15read_input_filePcPi, .-_Z15read_input_filePcPi
.globl _Z11levenshteinPcS_iPi
.type _Z11levenshteinPcS_iPi, @function
_Z11levenshteinPcS_iPi:
.LFB2075:
.cfi_startproc
endbr64
movq %rdi, %r9
movl %edx, %r11d
movq %rcx, %rdi
testl %edx, %edx
je .L27
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl $1, %eax
.L18:
movl %eax, %edx
movl %eax, (%rdi,%rdx,4)
addl $1, %eax
cmpl %eax, %r11d
jnb .L18
movq %rsi, %r8
movl $1, %ebx
movl %r11d, %r10d
jmp .L22
.L19:
movzbl (%r8), %ecx
cmpb %cl, (%r9,%rax)
setne %dl
movzbl %dl, %edx
addl %r12d, %edx
addl $1, %ebp
cmpl %ebp, %edx
cmova %ebp, %edx
.L20:
movl %edx, 4(%rdi,%rax,4)
addq $1, %rax
cmpq %rax, %r10
je .L30
.L21:
movl 4(%rdi,%rax,4), %ecx
movl %esi, %r12d
movl %ecx, %esi
movl (%rdi,%rax,4), %ebp
cmpl %ebp, %ecx
jge .L19
movzbl (%r8), %edx
cmpb %dl, (%r9,%rax)
setne %dl
movzbl %dl, %edx
addl %r12d, %edx
addl $1, %ecx
cmpl %ecx, %edx
cmova %ecx, %edx
jmp .L20
.L30:
leal 1(%rbx), %eax
addq $1, %r8
cmpl %ebx, %r11d
je .L31
movl %eax, %ebx
.L22:
movl %ebx, (%rdi)
leal -1(%rbx), %esi
movl $0, %eax
jmp .L21
.L31:
movslq %r11d, %r11
movl (%rdi,%r11,4), %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
movslq %edx, %r11
movl (%rcx,%r11,4), %eax
ret
.cfi_endproc
.LFE2075:
.size _Z11levenshteinPcS_iPi, .-_Z11levenshteinPcS_iPi
.globl _Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii
.type _Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii, @function
_Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii:
.LFB2101:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L36
.L32:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L37
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L36:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z13matchesKernelPiPcS0_iiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L32
.L37:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2101:
.size _Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii, .-_Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii
.globl _Z13matchesKernelPiPcS0_iiiii
.type _Z13matchesKernelPiPcS0_iiiii, @function
_Z13matchesKernelPiPcS0_iiiii:
.LFB2102:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2102:
.size _Z13matchesKernelPiPcS0_iiiii, .-_Z13matchesKernelPiPcS0_iiiii
.section .rodata.str1.8
.align 8
.LC5:
.string "Usage: %s approximation_factor dna_database pattern1 pattern2 ...\n"
.align 8
.LC6:
.string "Unable to allocate array of pattern of size %d\n"
.align 8
.LC7:
.string "Error while parsing argument %d\n"
.align 8
.LC8:
.string "Unable to allocate string of size %d\n"
.align 8
.LC9:
.string "Approximate Pattern Mathing: looking for %d pattern(s) in file %s w/ distance of %d\n"
.align 8
.LC10:
.string "Error: unable to allocate memory for %ldB\n"
.section .rodata.str1.1
.LC12:
.string "APM done in %lf s\n"
.section .rodata.str1.8
.align 8
.LC13:
.string "Number of matches for pattern <%s>: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2076:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $168, %rsp
.cfi_def_cfa_offset 224
movl %edi, 16(%rsp)
movq %rsi, %r14
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
cmpl $3, %edi
jle .L64
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, 32(%rsp)
movq 16(%r14), %rax
movq %rax, 24(%rsp)
movl 16(%rsp), %eax
subl $3, %eax
movl %eax, 40(%rsp)
cltq
movq %rax, 48(%rsp)
leaq 0(,%rax,8), %rdi
call malloc@PLT
movq %rax, %r13
testq %rax, %rax
je .L65
movl 16(%rsp), %eax
leal -4(%rax), %r15d
movl $0, %ebp
jmp .L46
.L64:
movq (%rsi), %rdx
leaq .LC5(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1, %eax
.L40:
movq 152(%rsp), %rdx
subq %fs:40, %rdx
jne .L66
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L65:
.cfi_restore_state
movl 40(%rsp), %ecx
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L40
.L56:
movq %rax, %rbp
.L46:
movq 24(%r14,%rbp,8), %r12
movq %r12, %rdi
call strlen@PLT
movl %eax, 8(%rsp)
testl %eax, %eax
jle .L67
addl $1, %eax
movslq %eax, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rdi
movq %rax, 0(%r13,%rbp,8)
testq %rax, %rax
je .L68
movq %rbx, %rcx
movq %rbx, %rdx
movq %r12, %rsi
call __strncpy_chk@PLT
leaq 1(%rbp), %rax
cmpq %r15, %rbp
jne .L56
movq 32(%rsp), %rax
movl %eax, 44(%rsp)
movl %eax, %r8d
movq 24(%rsp), %rbx
movq %rbx, %rcx
movl 40(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 68(%rsp), %rsi
movq %rbx, %rdi
call _Z15read_input_filePcPi
movq %rax, 56(%rsp)
testq %rax, %rax
je .L57
movq 48(%rsp), %rax
salq $2, %rax
movq %rax, 32(%rsp)
movq %rax, %rdi
call malloc@PLT
movq %rax, %r14
leaq 4(%rax,%r15,4), %rdx
.L47:
movl $0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L47
testq %r14, %r14
je .L69
leaq 112(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movq 32(%rsp), %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
movq %rax, 8(%rsp)
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbx
movq %rax, 48(%rsp)
movq 0(%r13), %rdi
call strlen@PLT
movl %eax, 24(%rsp)
movl %eax, (%rbx)
movl $0, 0(%rbp)
cmpl $1, 40(%rsp)
jle .L49
leaq 8(%r13), %r12
addq $4, %rbp
leaq 4(%rbx), %rbx
movl 16(%rsp), %eax
leal -5(%rax), %eax
leaq 16(%r13,%rax,8), %rax
movq %rax, 16(%rsp)
.L50:
movl -4(%rbx), %eax
addl -4(%rbp), %eax
movl %eax, 0(%rbp)
movq (%r12), %rdi
call strlen@PLT
movl %eax, (%rbx)
addl %eax, 24(%rsp)
addq $8, %r12
addq $4, %rbp
addq $4, %rbx
movq 16(%rsp), %rax
cmpq %rax, %r12
jne .L50
.L49:
movslq 24(%rsp), %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r12
movl $0, %ebx
.L51:
movq 8(%rsp), %rax
movslq (%rax,%rbx,4), %rdi
cmpq %rdi, %rbp
movq %rdi, %rdx
cmovnb %rbp, %rdx
subq %rdi, %rdx
movq 0(%r13,%rbx,8), %rsi
addq %r12, %rdi
call __strcpy_chk@PLT
movq %rbx, %rax
addq $1, %rbx
cmpq %r15, %rax
jne .L51
leaq 72(%rsp), %rdi
movq 32(%rsp), %rsi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl 68(%rsp), %r15d
movslq %r15d, %rbx
leaq 88(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq %r12, %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 56(%rsp), %rsi
movq 88(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq 32(%rsp), %rdx
movq %r14, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %ebx
jmp .L53
.L67:
leal 3(%rbp), %ecx
leaq .LC7(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L40
.L68:
movl 8(%rsp), %ecx
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L40
.L69:
movq 32(%rsp), %rcx
leaq .LC10(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L40
.L52:
call cudaGetLastError@PLT
addq $1, %rbx
cmpl %ebx, 40(%rsp)
jle .L70
.L53:
movl $256, 128(%rsp)
movl $1, 132(%rsp)
movl $1, 136(%rsp)
movl $4, 100(%rsp)
movl $1, 104(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 128(%rsp), %rdx
movl $1, %ecx
movq 100(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L52
movl 44(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 232
pushq %r15
.cfi_def_cfa_offset 240
movq 24(%rsp), %rax
movl (%rax,%rbx,4), %r9d
movq 64(%rsp), %rax
movl (%rax,%rbx,4), %r8d
movl %ebx, %ecx
movq 96(%rsp), %rdx
movq 104(%rsp), %rsi
movq 88(%rsp), %rdi
call _Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L52
.L70:
movl $2, %ecx
movq 32(%rsp), %rdx
movq 72(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
leaq 128(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movq 136(%rsp), %rax
subq 120(%rsp), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC11(%rip), %xmm0
movq 128(%rsp), %rax
subq 112(%rsp), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
addsd %xmm1, %xmm0
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $0, %ebx
leaq .LC13(%rip), %rbp
.L54:
movl (%r14,%rbx,4), %ecx
movq 0(%r13,%rbx,8), %rdx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpl %ebx, 40(%rsp)
jg .L54
movl $0, %eax
jmp .L40
.L57:
movl $1, %eax
jmp .L40
.L66:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2076:
.size main, .-main
.section .rodata.str1.1
.LC14:
.string "_Z13matchesKernelPiPcS0_iiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2104:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z13matchesKernelPiPcS0_iiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2104:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC11:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /**
* APPROXIMATE PATTERN MATCHING
*
* INF560
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#define APM_DEBUG 0
char *
read_input_file( char * filename, int * size )
{
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open( filename, O_RDONLY ) ;
if ( fd == -1 )
{
fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ;
return NULL ;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
if ( fsize == -1 )
{
fprintf( stderr, "Unable to lseek to the end\n" ) ;
return NULL ;
}
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Go back to the beginning of the input file */
if ( lseek(fd, 0, SEEK_SET) == -1 )
{
fprintf( stderr, "Unable to lseek to start\n" ) ;
return NULL ;
}
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if ( buf == NULL )
{
fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n",
fsize ) ;
return NULL ;
}
n_bytes = read( fd, buf, fsize ) ;
if ( n_bytes != fsize )
{
fprintf( stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close( fd ) ;
return buf ;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
__host__ __device__ int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
__global__ void matchesKernel(int* d_n_matches, char * d_buf, char * d_pattern, int i, int size_pattern, int offset, int n_bytes, int approx_factor){
/* Traverse the input data up to the end of the file */
int j = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int distance = 0 ;
int size ;
size = size_pattern ;
int* columns = (int *) malloc((size_pattern + 1) * sizeof(int));
while (j < n_bytes) {
if (n_bytes - j < size_pattern ){
size = n_bytes - j ;
}
distance = levenshtein(d_pattern + offset, &d_buf[j], size, columns ) ;
if ( distance <= approx_factor) {
atomicAdd(&d_n_matches[i], 1);
}
j += stride;
}
free(columns);
}
int
main( int argc, char ** argv )
{
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
int i;
char * buf ;
struct timeval t1, t2;
double duration ;
int n_bytes ;
int * n_matches ;
/* Check number of arguments */
if ( argc < 4 )
{
printf( "Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0] ) ;
return 1 ;
}
/* Get the distance factor */
approx_factor = atoi( argv[1] ) ;
/* Grab the filename containing the target text */
filename = argv[2] ;
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3 ;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if ( pattern == NULL )
{
fprintf( stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns ) ;
return 1 ;
}
/* Grab the patterns */
for ( i = 0 ; i < nb_patterns ; i++ )
{
int l ;
l = strlen(argv[i+3]) ;
if ( l <= 0 )
{
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if ( pattern[i] == NULL )
{
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor ) ;
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL )
{
return 1 ;
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
for (i = 0; i < nb_patterns; i++) {
n_matches[i] = 0;
}
if ( n_matches == NULL )
{
fprintf( stderr, "Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int ) ) ;
return 1 ;
}
/*****
* BEGIN MAIN LOOP
******/
/* Timer start */
gettimeofday(&t1, NULL);
/* Check each pattern one by one */
int* d_n_matches;
char * d_pattern;
char* d_buf;
int* offset = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int* lens = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int sum_lens;
lens[0] = strlen(pattern[0]);
offset[0] = 0;
sum_lens = lens[0];
for (i = 1; i < nb_patterns; i++) {
offset[i] = offset[i-1] + lens[i-1];
lens[i] = strlen(pattern[i]);
sum_lens += lens[i];
}
char* concat_patterns = (char*) malloc( sum_lens * sizeof( char ) ) ;
for (i = 0; i < nb_patterns; i++) {
strcpy (concat_patterns + offset[i], pattern[i]);
}
cudaError_t error;
cudaMalloc((void **)&d_n_matches, nb_patterns*sizeof(int));
cudaMalloc((void **)&d_pattern, sum_lens*sizeof(char));
cudaMalloc((void **)&d_buf, n_bytes);
cudaMemcpy(d_pattern, concat_patterns, sum_lens*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_buf, buf, n_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_n_matches, n_matches, nb_patterns*sizeof(int), cudaMemcpyHostToDevice);
int Dg = 4;
int Db = 256;
for (i = 0; i < nb_patterns; i++) {
matchesKernel<<<Dg,Db>>>(d_n_matches, d_buf, d_pattern, i, lens[i], offset[i], n_bytes, approx_factor);
cudaGetLastError();
}
cudaMemcpy(n_matches, d_n_matches, nb_patterns*sizeof(int), cudaMemcpyDeviceToHost);
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* END MAIN LOOP
******/
for ( i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
} | /**
* APPROXIMATE PATTERN MATCHING
*
* INF560
*/
#include <hip/hip_runtime.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#define APM_DEBUG 0
char *
read_input_file( char * filename, int * size )
{
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open( filename, O_RDONLY ) ;
if ( fd == -1 )
{
fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ;
return NULL ;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
if ( fsize == -1 )
{
fprintf( stderr, "Unable to lseek to the end\n" ) ;
return NULL ;
}
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Go back to the beginning of the input file */
if ( lseek(fd, 0, SEEK_SET) == -1 )
{
fprintf( stderr, "Unable to lseek to start\n" ) ;
return NULL ;
}
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if ( buf == NULL )
{
fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n",
fsize ) ;
return NULL ;
}
n_bytes = read( fd, buf, fsize ) ;
if ( n_bytes != fsize )
{
fprintf( stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close( fd ) ;
return buf ;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
__host__ __device__ int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
__global__ void matchesKernel(int* d_n_matches, char * d_buf, char * d_pattern, int i, int size_pattern, int offset, int n_bytes, int approx_factor){
/* Traverse the input data up to the end of the file */
int j = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int distance = 0 ;
int size ;
size = size_pattern ;
int* columns = (int *) malloc((size_pattern + 1) * sizeof(int));
while (j < n_bytes) {
if (n_bytes - j < size_pattern ){
size = n_bytes - j ;
}
distance = levenshtein(d_pattern + offset, &d_buf[j], size, columns ) ;
if ( distance <= approx_factor) {
atomicAdd(&d_n_matches[i], 1);
}
j += stride;
}
free(columns);
}
int
main( int argc, char ** argv )
{
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
int i;
char * buf ;
struct timeval t1, t2;
double duration ;
int n_bytes ;
int * n_matches ;
/* Check number of arguments */
if ( argc < 4 )
{
printf( "Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0] ) ;
return 1 ;
}
/* Get the distance factor */
approx_factor = atoi( argv[1] ) ;
/* Grab the filename containing the target text */
filename = argv[2] ;
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3 ;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if ( pattern == NULL )
{
fprintf( stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns ) ;
return 1 ;
}
/* Grab the patterns */
for ( i = 0 ; i < nb_patterns ; i++ )
{
int l ;
l = strlen(argv[i+3]) ;
if ( l <= 0 )
{
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if ( pattern[i] == NULL )
{
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor ) ;
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL )
{
return 1 ;
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
for (i = 0; i < nb_patterns; i++) {
n_matches[i] = 0;
}
if ( n_matches == NULL )
{
fprintf( stderr, "Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int ) ) ;
return 1 ;
}
/*****
* BEGIN MAIN LOOP
******/
/* Timer start */
gettimeofday(&t1, NULL);
/* Check each pattern one by one */
int* d_n_matches;
char * d_pattern;
char* d_buf;
int* offset = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int* lens = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int sum_lens;
lens[0] = strlen(pattern[0]);
offset[0] = 0;
sum_lens = lens[0];
for (i = 1; i < nb_patterns; i++) {
offset[i] = offset[i-1] + lens[i-1];
lens[i] = strlen(pattern[i]);
sum_lens += lens[i];
}
char* concat_patterns = (char*) malloc( sum_lens * sizeof( char ) ) ;
for (i = 0; i < nb_patterns; i++) {
strcpy (concat_patterns + offset[i], pattern[i]);
}
hipError_t error;
hipMalloc((void **)&d_n_matches, nb_patterns*sizeof(int));
hipMalloc((void **)&d_pattern, sum_lens*sizeof(char));
hipMalloc((void **)&d_buf, n_bytes);
hipMemcpy(d_pattern, concat_patterns, sum_lens*sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(d_buf, buf, n_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_n_matches, n_matches, nb_patterns*sizeof(int), hipMemcpyHostToDevice);
int Dg = 4;
int Db = 256;
for (i = 0; i < nb_patterns; i++) {
matchesKernel<<<Dg,Db>>>(d_n_matches, d_buf, d_pattern, i, lens[i], offset[i], n_bytes, approx_factor);
hipGetLastError();
}
hipMemcpy(n_matches, d_n_matches, nb_patterns*sizeof(int), hipMemcpyDeviceToHost);
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* END MAIN LOOP
******/
for ( i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /**
* APPROXIMATE PATTERN MATCHING
*
* INF560
*/
#include <hip/hip_runtime.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#define APM_DEBUG 0
char *
read_input_file( char * filename, int * size )
{
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open( filename, O_RDONLY ) ;
if ( fd == -1 )
{
fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ;
return NULL ;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
if ( fsize == -1 )
{
fprintf( stderr, "Unable to lseek to the end\n" ) ;
return NULL ;
}
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Go back to the beginning of the input file */
if ( lseek(fd, 0, SEEK_SET) == -1 )
{
fprintf( stderr, "Unable to lseek to start\n" ) ;
return NULL ;
}
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if ( buf == NULL )
{
fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n",
fsize ) ;
return NULL ;
}
n_bytes = read( fd, buf, fsize ) ;
if ( n_bytes != fsize )
{
fprintf( stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close( fd ) ;
return buf ;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
__host__ __device__ int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
__global__ void matchesKernel(int* d_n_matches, char * d_buf, char * d_pattern, int i, int size_pattern, int offset, int n_bytes, int approx_factor){
/* Traverse the input data up to the end of the file */
int j = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int distance = 0 ;
int size ;
size = size_pattern ;
int* columns = (int *) malloc((size_pattern + 1) * sizeof(int));
while (j < n_bytes) {
if (n_bytes - j < size_pattern ){
size = n_bytes - j ;
}
distance = levenshtein(d_pattern + offset, &d_buf[j], size, columns ) ;
if ( distance <= approx_factor) {
atomicAdd(&d_n_matches[i], 1);
}
j += stride;
}
free(columns);
}
int
main( int argc, char ** argv )
{
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
int i;
char * buf ;
struct timeval t1, t2;
double duration ;
int n_bytes ;
int * n_matches ;
/* Check number of arguments */
if ( argc < 4 )
{
printf( "Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0] ) ;
return 1 ;
}
/* Get the distance factor */
approx_factor = atoi( argv[1] ) ;
/* Grab the filename containing the target text */
filename = argv[2] ;
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3 ;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if ( pattern == NULL )
{
fprintf( stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns ) ;
return 1 ;
}
/* Grab the patterns */
for ( i = 0 ; i < nb_patterns ; i++ )
{
int l ;
l = strlen(argv[i+3]) ;
if ( l <= 0 )
{
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if ( pattern[i] == NULL )
{
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor ) ;
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL )
{
return 1 ;
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
for (i = 0; i < nb_patterns; i++) {
n_matches[i] = 0;
}
if ( n_matches == NULL )
{
fprintf( stderr, "Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int ) ) ;
return 1 ;
}
/*****
* BEGIN MAIN LOOP
******/
/* Timer start */
gettimeofday(&t1, NULL);
/* Check each pattern one by one */
int* d_n_matches;
char * d_pattern;
char* d_buf;
int* offset = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int* lens = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int sum_lens;
lens[0] = strlen(pattern[0]);
offset[0] = 0;
sum_lens = lens[0];
for (i = 1; i < nb_patterns; i++) {
offset[i] = offset[i-1] + lens[i-1];
lens[i] = strlen(pattern[i]);
sum_lens += lens[i];
}
char* concat_patterns = (char*) malloc( sum_lens * sizeof( char ) ) ;
for (i = 0; i < nb_patterns; i++) {
strcpy (concat_patterns + offset[i], pattern[i]);
}
hipError_t error;
hipMalloc((void **)&d_n_matches, nb_patterns*sizeof(int));
hipMalloc((void **)&d_pattern, sum_lens*sizeof(char));
hipMalloc((void **)&d_buf, n_bytes);
hipMemcpy(d_pattern, concat_patterns, sum_lens*sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(d_buf, buf, n_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_n_matches, n_matches, nb_patterns*sizeof(int), hipMemcpyHostToDevice);
int Dg = 4;
int Db = 256;
for (i = 0; i < nb_patterns; i++) {
matchesKernel<<<Dg,Db>>>(d_n_matches, d_buf, d_pattern, i, lens[i], offset[i], n_bytes, approx_factor);
hipGetLastError();
}
hipMemcpy(n_matches, d_n_matches, nb_patterns*sizeof(int), hipMemcpyDeviceToHost);
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* END MAIN LOOP
******/
for ( i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
} | .text
.file "apmGPU.hip"
.globl _Z15read_input_filePcPi # -- Begin function _Z15read_input_filePcPi
.p2align 4, 0x90
.type _Z15read_input_filePcPi,@function
_Z15read_input_filePcPi: # @_Z15read_input_filePcPi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r14
movq %rdi, %r15
xorl %esi, %esi
xorl %eax, %eax
callq open
cmpl $-1, %eax
je .LBB0_1
# %bb.3:
movl %eax, %ebp
movl %eax, %edi
xorl %esi, %esi
movl $2, %edx
callq lseek
cmpq $-1, %rax
je .LBB0_4
# %bb.6:
movq %rax, %r15
movl %ebp, %edi
xorl %esi, %esi
xorl %edx, %edx
callq lseek
cmpq $-1, %rax
je .LBB0_7
# %bb.8:
movq %r15, %rdi
callq malloc
testq %rax, %rax
je .LBB0_9
# %bb.10:
movq %rax, %rbx
movl %ebp, %edi
movq %rax, %rsi
movq %r15, %rdx
callq read
movslq %eax, %rcx
cmpq %r15, %rcx
jne .LBB0_11
# %bb.12:
movl %eax, (%r14)
movl %ebp, %edi
callq close
.LBB0_13:
movq %rbx, %rax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_1:
.cfi_def_cfa_offset 48
movq stderr(%rip), %rdi
xorl %ebx, %ebx
movl $.L.str, %esi
jmp .LBB0_2
.LBB0_4:
movq stderr(%rip), %rcx
movl $.L.str.1, %edi
movl $27, %esi
jmp .LBB0_5
.LBB0_7:
movq stderr(%rip), %rcx
movl $.L.str.2, %edi
movl $25, %esi
.LBB0_5:
movl $1, %edx
callq fwrite@PLT
xorl %ebx, %ebx
jmp .LBB0_13
.LBB0_9:
movq stderr(%rip), %rdi
xorl %ebx, %ebx
movl $.L.str.3, %esi
.LBB0_2:
movq %r15, %rdx
xorl %eax, %eax
callq fprintf
jmp .LBB0_13
.LBB0_11:
movq stderr(%rip), %rdi
xorl %ebx, %ebx
movl $.L.str.4, %esi
movq %r15, %rdx
movl %eax, %ecx
xorl %eax, %eax
callq fprintf
jmp .LBB0_13
.Lfunc_end0:
.size _Z15read_input_filePcPi, .Lfunc_end0-_Z15read_input_filePcPi
.cfi_endproc
# -- End function
.globl _Z11levenshteinPcS_iPi # -- Begin function _Z11levenshteinPcS_iPi
.p2align 4, 0x90
.type _Z11levenshteinPcS_iPi,@function
_Z11levenshteinPcS_iPi: # @_Z11levenshteinPcS_iPi
.cfi_startproc
# %bb.0:
# kill: def $edx killed $edx def $rdx
testl %edx, %edx
je .LBB1_8
# %bb.1: # %.lr.ph.preheader
leal 1(%rdx), %r8d
cmpl $3, %r8d
movl $2, %eax
cmovael %r8d, %eax
movl $1, %r9d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %r9d, (%rcx,%r9,4)
incq %r9
cmpq %r9, %rax
jne .LBB1_2
# %bb.3: # %.lr.ph71
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $3, %r8d
movl $2, %eax
cmovael %r8d, %eax
leaq -1(%rax), %r8
movl $1, %r9d
.p2align 4, 0x90
.LBB1_4: # %.lr.ph68
# =>This Loop Header: Depth=1
# Child Loop BB1_5 Depth 2
movl %r9d, (%rcx)
leaq -1(%r9), %r10
xorl %r11d, %r11d
movl %r9d, %ebx
movl %r10d, %ebp
.p2align 4, 0x90
.LBB1_5: # Parent Loop BB1_4 Depth=1
# => This Inner Loop Header: Depth=2
movl 4(%rcx,%r11,4), %r14d
movzbl (%rdi,%r11), %r15d
cmpl %ebx, %r14d
cmovll %r14d, %ebx
incl %ebx
xorl %r12d, %r12d
cmpb (%rsi,%r10), %r15b
setne %r12b
addl %ebp, %r12d
cmpl %r12d, %ebx
cmovael %r12d, %ebx
movl %ebx, 4(%rcx,%r11,4)
incq %r11
movl %r14d, %ebp
cmpq %r11, %r8
jne .LBB1_5
# %bb.6: # %._crit_edge
# in Loop: Header=BB1_4 Depth=1
incq %r9
cmpq %rax, %r9
jne .LBB1_4
# %bb.7:
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.LBB1_8: # %._crit_edge72
movslq %edx, %rax
movl (%rcx,%rax,4), %eax
retq
.Lfunc_end1:
.size _Z11levenshteinPcS_iPi, .Lfunc_end1-_Z11levenshteinPcS_iPi
.cfi_endproc
# -- End function
.globl _Z28__device_stub__matchesKernelPiPcS0_iiiii # -- Begin function _Z28__device_stub__matchesKernelPiPcS0_iiiii
.p2align 4, 0x90
.type _Z28__device_stub__matchesKernelPiPcS0_iiiii,@function
_Z28__device_stub__matchesKernelPiPcS0_iiiii: # @_Z28__device_stub__matchesKernelPiPcS0_iiiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13matchesKernelPiPcS0_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end2:
.size _Z28__device_stub__matchesKernelPiPcS0_iiiii, .Lfunc_end2-_Z28__device_stub__matchesKernelPiPcS0_iiiii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 336
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r12
cmpl $3, %edi
jg .LBB3_2
# %bb.1:
movq (%r12), %rsi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movl $1, %r13d
jmp .LBB3_30
.LBB3_2:
movl %edi, %r14d
movq 8(%r12), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 88(%rsp) # 8-byte Spill
movq 16(%r12), %r15
leal -3(%r14), %ebx
leaq (,%rbx,8), %rdi
callq malloc
movq %rax, 40(%rsp) # 8-byte Spill
testq %rax, %rax
je .LBB3_4
# %bb.3: # %.lr.ph.preheader
movq %r15, 8(%rsp) # 8-byte Spill
xorl %eax, %eax
movq %rax, 16(%rsp) # 8-byte Spill
movq %r14, 32(%rsp) # 8-byte Spill
movq %rbx, 24(%rsp) # 8-byte Spill
negq %rbx
movl $3, %r14d
.p2align 4, 0x90
.LBB3_6: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq (%r12,%r14,8), %rbp
movq %rbp, %rdi
callq strlen
movq %rax, %r13
testl %r13d, %r13d
jle .LBB3_7
# %bb.9: # in Loop: Header=BB3_6 Depth=1
movl %r13d, %r15d
incl %r15d
movq %r15, %rdi
callq malloc
movq 40(%rsp), %rcx # 8-byte Reload
movq %rax, -24(%rcx,%r14,8)
testq %rax, %rax
je .LBB3_10
# %bb.11: # in Loop: Header=BB3_6 Depth=1
movq %rax, %rdi
movq %rbp, %rsi
movq %r15, %rdx
callq strncpy
movb $1, %bpl
testb %bpl, %bpl
je .LBB3_13
.LBB3_5: # in Loop: Header=BB3_6 Depth=1
leaq (%rbx,%r14), %rax
incq %rax
incq %r14
cmpq $3, %rax
jne .LBB3_6
jmp .LBB3_14
.LBB3_7: # in Loop: Header=BB3_6 Depth=1
movq stderr(%rip), %rdi
xorl %ebp, %ebp
movl $.L.str.7, %esi
movl %r14d, %edx
jmp .LBB3_8
.LBB3_10: # in Loop: Header=BB3_6 Depth=1
movq stderr(%rip), %rdi
xorl %ebp, %ebp
movl $.L.str.8, %esi
movl %r13d, %edx
.LBB3_8: # in Loop: Header=BB3_6 Depth=1
xorl %eax, %eax
callq fprintf
movl $1, %eax
movq %rax, 16(%rsp) # 8-byte Spill
testb %bpl, %bpl
jne .LBB3_5
.LBB3_13:
movq 16(%rsp), %r13 # 8-byte Reload
jmp .LBB3_30
.LBB3_14: # %._crit_edge
movl $.L.str.9, %edi
movq 24(%rsp), %rbx # 8-byte Reload
movl %ebx, %esi
movq 8(%rsp), %r14 # 8-byte Reload
movq %r14, %rdx
movq 88(%rsp), %rcx # 8-byte Reload
# kill: def $ecx killed $ecx killed $rcx
xorl %eax, %eax
callq printf
leaq 56(%rsp), %rsi
movq %r14, %rdi
callq _Z15read_input_filePcPi
testq %rax, %rax
je .LBB3_15
# %bb.16: # %.lr.ph120.preheader
movq %rax, %r12
leaq (,%rbx,4), %r15
movq %r15, %rdi
callq malloc
movq %rax, %r14
cmpl $2, %ebx
movl $1, %r13d
movl $1, %edx
cmovgel %ebx, %edx
shlq $2, %rdx
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
testq %r14, %r14
je .LBB3_17
# %bb.18:
movq %r12, 112(%rsp) # 8-byte Spill
movq %r14, 80(%rsp) # 8-byte Spill
leaq 264(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq %r15, %rdi
callq malloc
movq %rax, %rbp
movq %r15, 8(%rsp) # 8-byte Spill
movq %r15, %rdi
callq malloc
movq %rax, %r13
movq 40(%rsp), %r12 # 8-byte Reload
movq (%r12), %rdi
callq strlen
movq %rax, %r14
movl %r14d, (%r13)
movl $0, (%rbp)
cmpl $4, 32(%rsp) # 4-byte Folded Reload
je .LBB3_21
# %bb.19: # %.lr.ph125.preheader
movq 24(%rsp), %rax # 8-byte Reload
cmpl $3, %eax
movl $2, %ebx
cmovgel %eax, %ebx
movl $1, %r15d
.p2align 4, 0x90
.LBB3_20: # %.lr.ph125
# =>This Inner Loop Header: Depth=1
movl -4(%r13,%r15,4), %eax
addl -4(%rbp,%r15,4), %eax
movl %eax, (%rbp,%r15,4)
movq (%r12,%r15,8), %rdi
callq strlen
movl %eax, (%r13,%r15,4)
addl %eax, %r14d
incq %r15
cmpq %r15, %rbx
jne .LBB3_20
.LBB3_21: # %._crit_edge126
movslq %r14d, %rdi
movq %rdi, 32(%rsp) # 8-byte Spill
callq malloc
movq %rax, %r14
movq 24(%rsp), %rax # 8-byte Reload
cmpl $2, %eax
movl $1, %r15d
cmovgel %eax, %r15d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB3_22: # %.lr.ph129
# =>This Inner Loop Header: Depth=1
movslq (%rbp,%rbx,4), %rdi
addq %r14, %rdi
movq (%r12,%rbx,8), %rsi
callq strcpy
incq %rbx
cmpq %rbx, %r15
jne .LBB3_22
# %bb.23: # %._crit_edge130
leaq 48(%rsp), %rdi
movq 8(%rsp), %rsi # 8-byte Reload
callq hipMalloc
leaq 104(%rsp), %rdi
movq 32(%rsp), %r12 # 8-byte Reload
movq %r12, %rsi
callq hipMalloc
movl 56(%rsp), %eax
movl %eax, 16(%rsp) # 4-byte Spill
movslq %eax, %rbx
leaq 96(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 104(%rsp), %rdi
movq %r14, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movq 96(%rsp), %rdi
movq 112(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq 48(%rsp), %rdi
movq 80(%rsp), %rsi # 8-byte Reload
movq 8(%rsp), %rdx # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
xorl %r12d, %r12d
movabsq $4294967300, %rax # imm = 0x100000004
movq %rax, %rbx
leaq 252(%rax), %r14
jmp .LBB3_24
.p2align 4, 0x90
.LBB3_26: # in Loop: Header=BB3_24 Depth=1
callq hipGetLastError
incq %r12
cmpq %r12, %r15
je .LBB3_27
.LBB3_24: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_26
# %bb.25: # in Loop: Header=BB3_24 Depth=1
movq 48(%rsp), %rax
movq %rax, 184(%rsp)
movq 96(%rsp), %rax
movq %rax, 176(%rsp)
movq 104(%rsp), %rax
movq %rax, 168(%rsp)
movl (%r13,%r12,4), %eax
movl %eax, 72(%rsp)
movl (%rbp,%r12,4), %eax
movl %eax, 68(%rsp)
movl %r12d, 76(%rsp)
movl 16(%rsp), %eax # 4-byte Reload
movl %eax, 64(%rsp)
movq 88(%rsp), %rax # 8-byte Reload
movl %eax, 60(%rsp)
leaq 184(%rsp), %rax
movq %rax, 192(%rsp)
leaq 176(%rsp), %rax
movq %rax, 200(%rsp)
leaq 168(%rsp), %rax
movq %rax, 208(%rsp)
leaq 76(%rsp), %rax
movq %rax, 216(%rsp)
leaq 72(%rsp), %rax
movq %rax, 224(%rsp)
leaq 68(%rsp), %rax
movq %rax, 232(%rsp)
leaq 64(%rsp), %rax
movq %rax, 240(%rsp)
leaq 60(%rsp), %rax
movq %rax, 248(%rsp)
leaq 152(%rsp), %rdi
leaq 136(%rsp), %rsi
leaq 128(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 152(%rsp), %rsi
movl 160(%rsp), %edx
movq 136(%rsp), %rcx
movl 144(%rsp), %r8d
movl $_Z13matchesKernelPiPcS0_iiiii, %edi
leaq 192(%rsp), %r9
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
pushq 136(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB3_26
.LBB3_27: # %._crit_edge134
movq 48(%rsp), %rsi
movq 80(%rsp), %r12 # 8-byte Reload
movq %r12, %rdi
movq 8(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
leaq 192(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 192(%rsp), %rax
movq 200(%rsp), %rcx
subq 264(%rsp), %rax
cvtsi2sd %rax, %xmm1
subq 272(%rsp), %rcx
cvtsi2sd %rcx, %xmm0
divsd .LCPI3_0(%rip), %xmm0
addsd %xmm1, %xmm0
movl $.L.str.11, %edi
movb $1, %al
callq printf
movq 24(%rsp), %rax # 8-byte Reload
cmpl $2, %eax
movl $1, %ebx
cmovgel %eax, %ebx
xorl %r14d, %r14d
movq 40(%rsp), %r15 # 8-byte Reload
.p2align 4, 0x90
.LBB3_28: # %.lr.ph137
# =>This Inner Loop Header: Depth=1
movq (%r15,%r14,8), %rsi
movl (%r12,%r14,4), %edx
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
incq %r14
cmpq %r14, %rbx
jne .LBB3_28
# %bb.29: # %._crit_edge138
xorl %r13d, %r13d
jmp .LBB3_30
.LBB3_15:
movl $1, %r13d
jmp .LBB3_30
.LBB3_4:
movq stderr(%rip), %rdi
movl $.L.str.6, %esi
movl %ebx, %edx
xorl %eax, %eax
callq fprintf
movl $1, %r13d
jmp .LBB3_30
.LBB3_17:
movq stderr(%rip), %rdi
movl $.L.str.10, %esi
movq %r15, %rdx
xorl %eax, %eax
callq fprintf
.LBB3_30: # %.loopexit
movl %r13d, %eax
addq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13matchesKernelPiPcS0_iiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Unable to open the text file <%s>\n"
.size .L.str, 35
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Unable to lseek to the end\n"
.size .L.str.1, 28
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Unable to lseek to start\n"
.size .L.str.2, 26
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Unable to allocate %lld byte(s) for main array\n"
.size .L.str.3, 48
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n"
.size .L.str.4, 64
.type _Z13matchesKernelPiPcS0_iiiii,@object # @_Z13matchesKernelPiPcS0_iiiii
.section .rodata,"a",@progbits
.globl _Z13matchesKernelPiPcS0_iiiii
.p2align 3, 0x0
_Z13matchesKernelPiPcS0_iiiii:
.quad _Z28__device_stub__matchesKernelPiPcS0_iiiii
.size _Z13matchesKernelPiPcS0_iiiii, 8
.type .L.str.5,@object # @.str.5
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.5:
.asciz "Usage: %s approximation_factor dna_database pattern1 pattern2 ...\n"
.size .L.str.5, 67
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Unable to allocate array of pattern of size %d\n"
.size .L.str.6, 48
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Error while parsing argument %d\n"
.size .L.str.7, 33
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Unable to allocate string of size %d\n"
.size .L.str.8, 38
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Approximate Pattern Mathing: looking for %d pattern(s) in file %s w/ distance of %d\n"
.size .L.str.9, 85
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Error: unable to allocate memory for %ldB\n"
.size .L.str.10, 43
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "APM done in %lf s\n"
.size .L.str.11, 19
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "Number of matches for pattern <%s>: %d\n"
.size .L.str.12, 40
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13matchesKernelPiPcS0_iiiii"
.size .L__unnamed_1, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__matchesKernelPiPcS0_iiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13matchesKernelPiPcS0_iiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0012e5ff_00000000-6_apmGPU.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2079:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2079:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Unable to open the text file <%s>\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Unable to lseek to the end\n"
.LC2:
.string "Unable to lseek to start\n"
.section .rodata.str1.8
.align 8
.LC3:
.string "Unable to allocate %lld byte(s) for main array\n"
.align 8
.LC4:
.string "Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n"
.text
.globl _Z15read_input_filePcPi
.type _Z15read_input_filePcPi, @function
_Z15read_input_filePcPi:
.LFB2074:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbp
movq %rsi, %r13
movl $0, %esi
call __open_2@PLT
cmpl $-1, %eax
je .L11
movl %eax, %ebx
movl $2, %edx
movl $0, %esi
movl %eax, %edi
call lseek@PLT
movq %rax, %rbp
cmpq $-1, %rax
je .L12
movl $0, %edx
movl $0, %esi
movl %ebx, %edi
call lseek@PLT
cmpq $-1, %rax
je .L13
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r12
testq %rax, %rax
je .L14
movq %rbp, %rcx
movq %rbp, %rdx
movq %rax, %rsi
movl %ebx, %edi
call __read_chk@PLT
movq %rax, %r8
cltq
cmpq %rbp, %rax
jne .L15
movl %r8d, 0(%r13)
movl %ebx, %edi
call close@PLT
.L3:
movq %r12, %rax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
movq %rbp, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %r12d
jmp .L3
.L12:
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %r12d
jmp .L3
.L13:
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %r12d
jmp .L3
.L14:
movq %rbp, %rcx
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L3
.L15:
movq %rbp, %rcx
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %r12d
jmp .L3
.cfi_endproc
.LFE2074:
.size _Z15read_input_filePcPi, .-_Z15read_input_filePcPi
.globl _Z11levenshteinPcS_iPi
.type _Z11levenshteinPcS_iPi, @function
_Z11levenshteinPcS_iPi:
.LFB2075:
.cfi_startproc
endbr64
movq %rdi, %r9
movl %edx, %r11d
movq %rcx, %rdi
testl %edx, %edx
je .L27
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl $1, %eax
.L18:
movl %eax, %edx
movl %eax, (%rdi,%rdx,4)
addl $1, %eax
cmpl %eax, %r11d
jnb .L18
movq %rsi, %r8
movl $1, %ebx
movl %r11d, %r10d
jmp .L22
.L19:
movzbl (%r8), %ecx
cmpb %cl, (%r9,%rax)
setne %dl
movzbl %dl, %edx
addl %r12d, %edx
addl $1, %ebp
cmpl %ebp, %edx
cmova %ebp, %edx
.L20:
movl %edx, 4(%rdi,%rax,4)
addq $1, %rax
cmpq %rax, %r10
je .L30
.L21:
movl 4(%rdi,%rax,4), %ecx
movl %esi, %r12d
movl %ecx, %esi
movl (%rdi,%rax,4), %ebp
cmpl %ebp, %ecx
jge .L19
movzbl (%r8), %edx
cmpb %dl, (%r9,%rax)
setne %dl
movzbl %dl, %edx
addl %r12d, %edx
addl $1, %ecx
cmpl %ecx, %edx
cmova %ecx, %edx
jmp .L20
.L30:
leal 1(%rbx), %eax
addq $1, %r8
cmpl %ebx, %r11d
je .L31
movl %eax, %ebx
.L22:
movl %ebx, (%rdi)
leal -1(%rbx), %esi
movl $0, %eax
jmp .L21
.L31:
movslq %r11d, %r11
movl (%rdi,%r11,4), %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
movslq %edx, %r11
movl (%rcx,%r11,4), %eax
ret
.cfi_endproc
.LFE2075:
.size _Z11levenshteinPcS_iPi, .-_Z11levenshteinPcS_iPi
.globl _Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii
.type _Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii, @function
_Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii:
.LFB2101:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L36
.L32:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L37
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L36:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z13matchesKernelPiPcS0_iiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L32
.L37:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2101:
.size _Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii, .-_Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii
.globl _Z13matchesKernelPiPcS0_iiiii
.type _Z13matchesKernelPiPcS0_iiiii, @function
_Z13matchesKernelPiPcS0_iiiii:
.LFB2102:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2102:
.size _Z13matchesKernelPiPcS0_iiiii, .-_Z13matchesKernelPiPcS0_iiiii
.section .rodata.str1.8
.align 8
.LC5:
.string "Usage: %s approximation_factor dna_database pattern1 pattern2 ...\n"
.align 8
.LC6:
.string "Unable to allocate array of pattern of size %d\n"
.align 8
.LC7:
.string "Error while parsing argument %d\n"
.align 8
.LC8:
.string "Unable to allocate string of size %d\n"
.align 8
.LC9:
.string "Approximate Pattern Mathing: looking for %d pattern(s) in file %s w/ distance of %d\n"
.align 8
.LC10:
.string "Error: unable to allocate memory for %ldB\n"
.section .rodata.str1.1
.LC12:
.string "APM done in %lf s\n"
.section .rodata.str1.8
.align 8
.LC13:
.string "Number of matches for pattern <%s>: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2076:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $168, %rsp
.cfi_def_cfa_offset 224
movl %edi, 16(%rsp)
movq %rsi, %r14
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
cmpl $3, %edi
jle .L64
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, 32(%rsp)
movq 16(%r14), %rax
movq %rax, 24(%rsp)
movl 16(%rsp), %eax
subl $3, %eax
movl %eax, 40(%rsp)
cltq
movq %rax, 48(%rsp)
leaq 0(,%rax,8), %rdi
call malloc@PLT
movq %rax, %r13
testq %rax, %rax
je .L65
movl 16(%rsp), %eax
leal -4(%rax), %r15d
movl $0, %ebp
jmp .L46
.L64:
movq (%rsi), %rdx
leaq .LC5(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $1, %eax
.L40:
movq 152(%rsp), %rdx
subq %fs:40, %rdx
jne .L66
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L65:
.cfi_restore_state
movl 40(%rsp), %ecx
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L40
.L56:
movq %rax, %rbp
.L46:
movq 24(%r14,%rbp,8), %r12
movq %r12, %rdi
call strlen@PLT
movl %eax, 8(%rsp)
testl %eax, %eax
jle .L67
addl $1, %eax
movslq %eax, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rdi
movq %rax, 0(%r13,%rbp,8)
testq %rax, %rax
je .L68
movq %rbx, %rcx
movq %rbx, %rdx
movq %r12, %rsi
call __strncpy_chk@PLT
leaq 1(%rbp), %rax
cmpq %r15, %rbp
jne .L56
movq 32(%rsp), %rax
movl %eax, 44(%rsp)
movl %eax, %r8d
movq 24(%rsp), %rbx
movq %rbx, %rcx
movl 40(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 68(%rsp), %rsi
movq %rbx, %rdi
call _Z15read_input_filePcPi
movq %rax, 56(%rsp)
testq %rax, %rax
je .L57
movq 48(%rsp), %rax
salq $2, %rax
movq %rax, 32(%rsp)
movq %rax, %rdi
call malloc@PLT
movq %rax, %r14
leaq 4(%rax,%r15,4), %rdx
.L47:
movl $0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L47
testq %r14, %r14
je .L69
leaq 112(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movq 32(%rsp), %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
movq %rax, 8(%rsp)
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbx
movq %rax, 48(%rsp)
movq 0(%r13), %rdi
call strlen@PLT
movl %eax, 24(%rsp)
movl %eax, (%rbx)
movl $0, 0(%rbp)
cmpl $1, 40(%rsp)
jle .L49
leaq 8(%r13), %r12
addq $4, %rbp
leaq 4(%rbx), %rbx
movl 16(%rsp), %eax
leal -5(%rax), %eax
leaq 16(%r13,%rax,8), %rax
movq %rax, 16(%rsp)
.L50:
movl -4(%rbx), %eax
addl -4(%rbp), %eax
movl %eax, 0(%rbp)
movq (%r12), %rdi
call strlen@PLT
movl %eax, (%rbx)
addl %eax, 24(%rsp)
addq $8, %r12
addq $4, %rbp
addq $4, %rbx
movq 16(%rsp), %rax
cmpq %rax, %r12
jne .L50
.L49:
movslq 24(%rsp), %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r12
movl $0, %ebx
.L51:
movq 8(%rsp), %rax
movslq (%rax,%rbx,4), %rdi
cmpq %rdi, %rbp
movq %rdi, %rdx
cmovnb %rbp, %rdx
subq %rdi, %rdx
movq 0(%r13,%rbx,8), %rsi
addq %r12, %rdi
call __strcpy_chk@PLT
movq %rbx, %rax
addq $1, %rbx
cmpq %r15, %rax
jne .L51
leaq 72(%rsp), %rdi
movq 32(%rsp), %rsi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl 68(%rsp), %r15d
movslq %r15d, %rbx
leaq 88(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq %r12, %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 56(%rsp), %rsi
movq 88(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq 32(%rsp), %rdx
movq %r14, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %ebx
jmp .L53
.L67:
leal 3(%rbp), %ecx
leaq .LC7(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L40
.L68:
movl 8(%rsp), %ecx
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L40
.L69:
movq 32(%rsp), %rcx
leaq .LC10(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L40
.L52:
call cudaGetLastError@PLT
addq $1, %rbx
cmpl %ebx, 40(%rsp)
jle .L70
.L53:
movl $256, 128(%rsp)
movl $1, 132(%rsp)
movl $1, 136(%rsp)
movl $4, 100(%rsp)
movl $1, 104(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 128(%rsp), %rdx
movl $1, %ecx
movq 100(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L52
movl 44(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 232
pushq %r15
.cfi_def_cfa_offset 240
movq 24(%rsp), %rax
movl (%rax,%rbx,4), %r9d
movq 64(%rsp), %rax
movl (%rax,%rbx,4), %r8d
movl %ebx, %ecx
movq 96(%rsp), %rdx
movq 104(%rsp), %rsi
movq 88(%rsp), %rdi
call _Z43__device_stub__Z13matchesKernelPiPcS0_iiiiiPiPcS0_iiiii
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L52
.L70:
movl $2, %ecx
movq 32(%rsp), %rdx
movq 72(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
leaq 128(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movq 136(%rsp), %rax
subq 120(%rsp), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC11(%rip), %xmm0
movq 128(%rsp), %rax
subq 112(%rsp), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
addsd %xmm1, %xmm0
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $0, %ebx
leaq .LC13(%rip), %rbp
.L54:
movl (%r14,%rbx,4), %ecx
movq 0(%r13,%rbx,8), %rdx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpl %ebx, 40(%rsp)
jg .L54
movl $0, %eax
jmp .L40
.L57:
movl $1, %eax
jmp .L40
.L66:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2076:
.size main, .-main
.section .rodata.str1.1
.LC14:
.string "_Z13matchesKernelPiPcS0_iiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2104:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z13matchesKernelPiPcS0_iiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2104:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC11:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "apmGPU.hip"
.globl _Z15read_input_filePcPi # -- Begin function _Z15read_input_filePcPi
.p2align 4, 0x90
.type _Z15read_input_filePcPi,@function
_Z15read_input_filePcPi: # @_Z15read_input_filePcPi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r14
movq %rdi, %r15
xorl %esi, %esi
xorl %eax, %eax
callq open
cmpl $-1, %eax
je .LBB0_1
# %bb.3:
movl %eax, %ebp
movl %eax, %edi
xorl %esi, %esi
movl $2, %edx
callq lseek
cmpq $-1, %rax
je .LBB0_4
# %bb.6:
movq %rax, %r15
movl %ebp, %edi
xorl %esi, %esi
xorl %edx, %edx
callq lseek
cmpq $-1, %rax
je .LBB0_7
# %bb.8:
movq %r15, %rdi
callq malloc
testq %rax, %rax
je .LBB0_9
# %bb.10:
movq %rax, %rbx
movl %ebp, %edi
movq %rax, %rsi
movq %r15, %rdx
callq read
movslq %eax, %rcx
cmpq %r15, %rcx
jne .LBB0_11
# %bb.12:
movl %eax, (%r14)
movl %ebp, %edi
callq close
.LBB0_13:
movq %rbx, %rax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_1:
.cfi_def_cfa_offset 48
movq stderr(%rip), %rdi
xorl %ebx, %ebx
movl $.L.str, %esi
jmp .LBB0_2
.LBB0_4:
movq stderr(%rip), %rcx
movl $.L.str.1, %edi
movl $27, %esi
jmp .LBB0_5
.LBB0_7:
movq stderr(%rip), %rcx
movl $.L.str.2, %edi
movl $25, %esi
.LBB0_5:
movl $1, %edx
callq fwrite@PLT
xorl %ebx, %ebx
jmp .LBB0_13
.LBB0_9:
movq stderr(%rip), %rdi
xorl %ebx, %ebx
movl $.L.str.3, %esi
.LBB0_2:
movq %r15, %rdx
xorl %eax, %eax
callq fprintf
jmp .LBB0_13
.LBB0_11:
movq stderr(%rip), %rdi
xorl %ebx, %ebx
movl $.L.str.4, %esi
movq %r15, %rdx
movl %eax, %ecx
xorl %eax, %eax
callq fprintf
jmp .LBB0_13
.Lfunc_end0:
.size _Z15read_input_filePcPi, .Lfunc_end0-_Z15read_input_filePcPi
.cfi_endproc
# -- End function
.globl _Z11levenshteinPcS_iPi # -- Begin function _Z11levenshteinPcS_iPi
.p2align 4, 0x90
.type _Z11levenshteinPcS_iPi,@function
_Z11levenshteinPcS_iPi: # @_Z11levenshteinPcS_iPi
.cfi_startproc
# %bb.0:
# kill: def $edx killed $edx def $rdx
testl %edx, %edx
je .LBB1_8
# %bb.1: # %.lr.ph.preheader
leal 1(%rdx), %r8d
cmpl $3, %r8d
movl $2, %eax
cmovael %r8d, %eax
movl $1, %r9d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %r9d, (%rcx,%r9,4)
incq %r9
cmpq %r9, %rax
jne .LBB1_2
# %bb.3: # %.lr.ph71
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $3, %r8d
movl $2, %eax
cmovael %r8d, %eax
leaq -1(%rax), %r8
movl $1, %r9d
.p2align 4, 0x90
.LBB1_4: # %.lr.ph68
# =>This Loop Header: Depth=1
# Child Loop BB1_5 Depth 2
movl %r9d, (%rcx)
leaq -1(%r9), %r10
xorl %r11d, %r11d
movl %r9d, %ebx
movl %r10d, %ebp
.p2align 4, 0x90
.LBB1_5: # Parent Loop BB1_4 Depth=1
# => This Inner Loop Header: Depth=2
movl 4(%rcx,%r11,4), %r14d
movzbl (%rdi,%r11), %r15d
cmpl %ebx, %r14d
cmovll %r14d, %ebx
incl %ebx
xorl %r12d, %r12d
cmpb (%rsi,%r10), %r15b
setne %r12b
addl %ebp, %r12d
cmpl %r12d, %ebx
cmovael %r12d, %ebx
movl %ebx, 4(%rcx,%r11,4)
incq %r11
movl %r14d, %ebp
cmpq %r11, %r8
jne .LBB1_5
# %bb.6: # %._crit_edge
# in Loop: Header=BB1_4 Depth=1
incq %r9
cmpq %rax, %r9
jne .LBB1_4
# %bb.7:
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.LBB1_8: # %._crit_edge72
movslq %edx, %rax
movl (%rcx,%rax,4), %eax
retq
.Lfunc_end1:
.size _Z11levenshteinPcS_iPi, .Lfunc_end1-_Z11levenshteinPcS_iPi
.cfi_endproc
# -- End function
.globl _Z28__device_stub__matchesKernelPiPcS0_iiiii # -- Begin function _Z28__device_stub__matchesKernelPiPcS0_iiiii
.p2align 4, 0x90
.type _Z28__device_stub__matchesKernelPiPcS0_iiiii,@function
_Z28__device_stub__matchesKernelPiPcS0_iiiii: # @_Z28__device_stub__matchesKernelPiPcS0_iiiii
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13matchesKernelPiPcS0_iiiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end2:
.size _Z28__device_stub__matchesKernelPiPcS0_iiiii, .Lfunc_end2-_Z28__device_stub__matchesKernelPiPcS0_iiiii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 336
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r12
cmpl $3, %edi
jg .LBB3_2
# %bb.1:
movq (%r12), %rsi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movl $1, %r13d
jmp .LBB3_30
.LBB3_2:
movl %edi, %r14d
movq 8(%r12), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 88(%rsp) # 8-byte Spill
movq 16(%r12), %r15
leal -3(%r14), %ebx
leaq (,%rbx,8), %rdi
callq malloc
movq %rax, 40(%rsp) # 8-byte Spill
testq %rax, %rax
je .LBB3_4
# %bb.3: # %.lr.ph.preheader
movq %r15, 8(%rsp) # 8-byte Spill
xorl %eax, %eax
movq %rax, 16(%rsp) # 8-byte Spill
movq %r14, 32(%rsp) # 8-byte Spill
movq %rbx, 24(%rsp) # 8-byte Spill
negq %rbx
movl $3, %r14d
.p2align 4, 0x90
.LBB3_6: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq (%r12,%r14,8), %rbp
movq %rbp, %rdi
callq strlen
movq %rax, %r13
testl %r13d, %r13d
jle .LBB3_7
# %bb.9: # in Loop: Header=BB3_6 Depth=1
movl %r13d, %r15d
incl %r15d
movq %r15, %rdi
callq malloc
movq 40(%rsp), %rcx # 8-byte Reload
movq %rax, -24(%rcx,%r14,8)
testq %rax, %rax
je .LBB3_10
# %bb.11: # in Loop: Header=BB3_6 Depth=1
movq %rax, %rdi
movq %rbp, %rsi
movq %r15, %rdx
callq strncpy
movb $1, %bpl
testb %bpl, %bpl
je .LBB3_13
.LBB3_5: # in Loop: Header=BB3_6 Depth=1
leaq (%rbx,%r14), %rax
incq %rax
incq %r14
cmpq $3, %rax
jne .LBB3_6
jmp .LBB3_14
.LBB3_7: # in Loop: Header=BB3_6 Depth=1
movq stderr(%rip), %rdi
xorl %ebp, %ebp
movl $.L.str.7, %esi
movl %r14d, %edx
jmp .LBB3_8
.LBB3_10: # in Loop: Header=BB3_6 Depth=1
movq stderr(%rip), %rdi
xorl %ebp, %ebp
movl $.L.str.8, %esi
movl %r13d, %edx
.LBB3_8: # in Loop: Header=BB3_6 Depth=1
xorl %eax, %eax
callq fprintf
movl $1, %eax
movq %rax, 16(%rsp) # 8-byte Spill
testb %bpl, %bpl
jne .LBB3_5
.LBB3_13:
movq 16(%rsp), %r13 # 8-byte Reload
jmp .LBB3_30
.LBB3_14: # %._crit_edge
movl $.L.str.9, %edi
movq 24(%rsp), %rbx # 8-byte Reload
movl %ebx, %esi
movq 8(%rsp), %r14 # 8-byte Reload
movq %r14, %rdx
movq 88(%rsp), %rcx # 8-byte Reload
# kill: def $ecx killed $ecx killed $rcx
xorl %eax, %eax
callq printf
leaq 56(%rsp), %rsi
movq %r14, %rdi
callq _Z15read_input_filePcPi
testq %rax, %rax
je .LBB3_15
# %bb.16: # %.lr.ph120.preheader
movq %rax, %r12
leaq (,%rbx,4), %r15
movq %r15, %rdi
callq malloc
movq %rax, %r14
cmpl $2, %ebx
movl $1, %r13d
movl $1, %edx
cmovgel %ebx, %edx
shlq $2, %rdx
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
testq %r14, %r14
je .LBB3_17
# %bb.18:
movq %r12, 112(%rsp) # 8-byte Spill
movq %r14, 80(%rsp) # 8-byte Spill
leaq 264(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq %r15, %rdi
callq malloc
movq %rax, %rbp
movq %r15, 8(%rsp) # 8-byte Spill
movq %r15, %rdi
callq malloc
movq %rax, %r13
movq 40(%rsp), %r12 # 8-byte Reload
movq (%r12), %rdi
callq strlen
movq %rax, %r14
movl %r14d, (%r13)
movl $0, (%rbp)
cmpl $4, 32(%rsp) # 4-byte Folded Reload
je .LBB3_21
# %bb.19: # %.lr.ph125.preheader
movq 24(%rsp), %rax # 8-byte Reload
cmpl $3, %eax
movl $2, %ebx
cmovgel %eax, %ebx
movl $1, %r15d
.p2align 4, 0x90
.LBB3_20: # %.lr.ph125
# =>This Inner Loop Header: Depth=1
movl -4(%r13,%r15,4), %eax
addl -4(%rbp,%r15,4), %eax
movl %eax, (%rbp,%r15,4)
movq (%r12,%r15,8), %rdi
callq strlen
movl %eax, (%r13,%r15,4)
addl %eax, %r14d
incq %r15
cmpq %r15, %rbx
jne .LBB3_20
.LBB3_21: # %._crit_edge126
movslq %r14d, %rdi
movq %rdi, 32(%rsp) # 8-byte Spill
callq malloc
movq %rax, %r14
movq 24(%rsp), %rax # 8-byte Reload
cmpl $2, %eax
movl $1, %r15d
cmovgel %eax, %r15d
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB3_22: # %.lr.ph129
# =>This Inner Loop Header: Depth=1
movslq (%rbp,%rbx,4), %rdi
addq %r14, %rdi
movq (%r12,%rbx,8), %rsi
callq strcpy
incq %rbx
cmpq %rbx, %r15
jne .LBB3_22
# %bb.23: # %._crit_edge130
leaq 48(%rsp), %rdi
movq 8(%rsp), %rsi # 8-byte Reload
callq hipMalloc
leaq 104(%rsp), %rdi
movq 32(%rsp), %r12 # 8-byte Reload
movq %r12, %rsi
callq hipMalloc
movl 56(%rsp), %eax
movl %eax, 16(%rsp) # 4-byte Spill
movslq %eax, %rbx
leaq 96(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 104(%rsp), %rdi
movq %r14, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
movq 96(%rsp), %rdi
movq 112(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq 48(%rsp), %rdi
movq 80(%rsp), %rsi # 8-byte Reload
movq 8(%rsp), %rdx # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
xorl %r12d, %r12d
movabsq $4294967300, %rax # imm = 0x100000004
movq %rax, %rbx
leaq 252(%rax), %r14
jmp .LBB3_24
.p2align 4, 0x90
.LBB3_26: # in Loop: Header=BB3_24 Depth=1
callq hipGetLastError
incq %r12
cmpq %r12, %r15
je .LBB3_27
.LBB3_24: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_26
# %bb.25: # in Loop: Header=BB3_24 Depth=1
movq 48(%rsp), %rax
movq %rax, 184(%rsp)
movq 96(%rsp), %rax
movq %rax, 176(%rsp)
movq 104(%rsp), %rax
movq %rax, 168(%rsp)
movl (%r13,%r12,4), %eax
movl %eax, 72(%rsp)
movl (%rbp,%r12,4), %eax
movl %eax, 68(%rsp)
movl %r12d, 76(%rsp)
movl 16(%rsp), %eax # 4-byte Reload
movl %eax, 64(%rsp)
movq 88(%rsp), %rax # 8-byte Reload
movl %eax, 60(%rsp)
leaq 184(%rsp), %rax
movq %rax, 192(%rsp)
leaq 176(%rsp), %rax
movq %rax, 200(%rsp)
leaq 168(%rsp), %rax
movq %rax, 208(%rsp)
leaq 76(%rsp), %rax
movq %rax, 216(%rsp)
leaq 72(%rsp), %rax
movq %rax, 224(%rsp)
leaq 68(%rsp), %rax
movq %rax, 232(%rsp)
leaq 64(%rsp), %rax
movq %rax, 240(%rsp)
leaq 60(%rsp), %rax
movq %rax, 248(%rsp)
leaq 152(%rsp), %rdi
leaq 136(%rsp), %rsi
leaq 128(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 152(%rsp), %rsi
movl 160(%rsp), %edx
movq 136(%rsp), %rcx
movl 144(%rsp), %r8d
movl $_Z13matchesKernelPiPcS0_iiiii, %edi
leaq 192(%rsp), %r9
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
pushq 136(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB3_26
.LBB3_27: # %._crit_edge134
movq 48(%rsp), %rsi
movq 80(%rsp), %r12 # 8-byte Reload
movq %r12, %rdi
movq 8(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
leaq 192(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movq 192(%rsp), %rax
movq 200(%rsp), %rcx
subq 264(%rsp), %rax
cvtsi2sd %rax, %xmm1
subq 272(%rsp), %rcx
cvtsi2sd %rcx, %xmm0
divsd .LCPI3_0(%rip), %xmm0
addsd %xmm1, %xmm0
movl $.L.str.11, %edi
movb $1, %al
callq printf
movq 24(%rsp), %rax # 8-byte Reload
cmpl $2, %eax
movl $1, %ebx
cmovgel %eax, %ebx
xorl %r14d, %r14d
movq 40(%rsp), %r15 # 8-byte Reload
.p2align 4, 0x90
.LBB3_28: # %.lr.ph137
# =>This Inner Loop Header: Depth=1
movq (%r15,%r14,8), %rsi
movl (%r12,%r14,4), %edx
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
incq %r14
cmpq %r14, %rbx
jne .LBB3_28
# %bb.29: # %._crit_edge138
xorl %r13d, %r13d
jmp .LBB3_30
.LBB3_15:
movl $1, %r13d
jmp .LBB3_30
.LBB3_4:
movq stderr(%rip), %rdi
movl $.L.str.6, %esi
movl %ebx, %edx
xorl %eax, %eax
callq fprintf
movl $1, %r13d
jmp .LBB3_30
.LBB3_17:
movq stderr(%rip), %rdi
movl $.L.str.10, %esi
movq %r15, %rdx
xorl %eax, %eax
callq fprintf
.LBB3_30: # %.loopexit
movl %r13d, %eax
addq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13matchesKernelPiPcS0_iiiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Unable to open the text file <%s>\n"
.size .L.str, 35
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Unable to lseek to the end\n"
.size .L.str.1, 28
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Unable to lseek to start\n"
.size .L.str.2, 26
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Unable to allocate %lld byte(s) for main array\n"
.size .L.str.3, 48
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n"
.size .L.str.4, 64
.type _Z13matchesKernelPiPcS0_iiiii,@object # @_Z13matchesKernelPiPcS0_iiiii
.section .rodata,"a",@progbits
.globl _Z13matchesKernelPiPcS0_iiiii
.p2align 3, 0x0
_Z13matchesKernelPiPcS0_iiiii:
.quad _Z28__device_stub__matchesKernelPiPcS0_iiiii
.size _Z13matchesKernelPiPcS0_iiiii, 8
.type .L.str.5,@object # @.str.5
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.5:
.asciz "Usage: %s approximation_factor dna_database pattern1 pattern2 ...\n"
.size .L.str.5, 67
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Unable to allocate array of pattern of size %d\n"
.size .L.str.6, 48
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Error while parsing argument %d\n"
.size .L.str.7, 33
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Unable to allocate string of size %d\n"
.size .L.str.8, 38
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Approximate Pattern Mathing: looking for %d pattern(s) in file %s w/ distance of %d\n"
.size .L.str.9, 85
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Error: unable to allocate memory for %ldB\n"
.size .L.str.10, 43
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "APM done in %lf s\n"
.size .L.str.11, 19
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "Number of matches for pattern <%s>: %d\n"
.size .L.str.12, 40
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13matchesKernelPiPcS0_iiiii"
.size .L__unnamed_1, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__matchesKernelPiPcS0_iiiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13matchesKernelPiPcS0_iiiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void vectorAdd(int* a, int* b, int* c, int n){
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < n){
c[tid] = a[tid] + b[tid];
}
}
void matrix_init(int* a, int n){
for (int i = 0; i < n; i++){
a[i] = rand()%100;
}
}
void check_error(int* a, int* b, int* c, int n){
for (int i = 0; i < n; i++){
assert(c[i] == a[i] + b[i]);
}
}
int main(){
// 2^16
int n = 1 << 16;
// h_ = host variables (cpu)
int *h_a, *h_b, *h_c;
// device variables (gpu)
int *d_a, *d_b, *d_c;
size_t bytes = sizeof(int) * n;
// Allocate host memory
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate device memory
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Initialize vectors with random values
matrix_init(h_a, n);
matrix_init(h_b, n);
// Copy data from host to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int NUM_THREADS = 256;
int NUM_BLOCKS = (int)ceil(n / NUM_THREADS);
vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
check_error(h_a, h_b, h_c, n);
printf("Completed.\n");
return 0;
} | code for sm_80
Function : _Z9vectorAddPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void vectorAdd(int* a, int* b, int* c, int n){
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < n){
c[tid] = a[tid] + b[tid];
}
}
void matrix_init(int* a, int n){
for (int i = 0; i < n; i++){
a[i] = rand()%100;
}
}
void check_error(int* a, int* b, int* c, int n){
for (int i = 0; i < n; i++){
assert(c[i] == a[i] + b[i]);
}
}
int main(){
// 2^16
int n = 1 << 16;
// h_ = host variables (cpu)
int *h_a, *h_b, *h_c;
// device variables (gpu)
int *d_a, *d_b, *d_c;
size_t bytes = sizeof(int) * n;
// Allocate host memory
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate device memory
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Initialize vectors with random values
matrix_init(h_a, n);
matrix_init(h_b, n);
// Copy data from host to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int NUM_THREADS = 256;
int NUM_BLOCKS = (int)ceil(n / NUM_THREADS);
vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
check_error(h_a, h_b, h_c, n);
printf("Completed.\n");
return 0;
} | .file "tmpxft_00195d13_00000000-6_vector_add.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11matrix_initPii
.type _Z11matrix_initPii, @function
_Z11matrix_initPii:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $100, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2057:
.size _Z11matrix_initPii, .-_Z11matrix_initPii
.globl _Z11check_errorPiS_S_i
.type _Z11check_errorPiS_S_i, @function
_Z11check_errorPiS_S_i:
.LFB2058:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L11
movl $0, %eax
.L13:
addl $1, %eax
cmpl %eax, %ecx
jne .L13
.L11:
ret
.cfi_endproc
.LFE2058:
.size _Z11check_errorPiS_S_i, .-_Z11check_errorPiS_S_i
.globl _Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i
.type _Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i, @function
_Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i:
.LFB2084:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9vectorAddPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i, .-_Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i
.globl _Z9vectorAddPiS_S_i
.type _Z9vectorAddPiS_S_i, @function
_Z9vectorAddPiS_S_i:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z9vectorAddPiS_S_i, .-_Z9vectorAddPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Completed.\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $262144, %edi
call malloc@PLT
movq %rax, %rbp
movl $262144, %edi
call malloc@PLT
movq %rax, %rbx
movl $262144, %edi
call malloc@PLT
movq %rax, %r12
leaq 8(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
movl $65536, %esi
movq %rbp, %rdi
call _Z11matrix_initPii
movl $65536, %esi
movq %rbx, %rdi
call _Z11matrix_initPii
movl $1, %ecx
movl $262144, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $262144, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $256, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L27
.L24:
movl $2, %ecx
movl $262144, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L28
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
movl $65536, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i
jmp .L24
.L28:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z9vectorAddPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9vectorAddPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void vectorAdd(int* a, int* b, int* c, int n){
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < n){
c[tid] = a[tid] + b[tid];
}
}
void matrix_init(int* a, int n){
for (int i = 0; i < n; i++){
a[i] = rand()%100;
}
}
void check_error(int* a, int* b, int* c, int n){
for (int i = 0; i < n; i++){
assert(c[i] == a[i] + b[i]);
}
}
int main(){
// 2^16
int n = 1 << 16;
// h_ = host variables (cpu)
int *h_a, *h_b, *h_c;
// device variables (gpu)
int *d_a, *d_b, *d_c;
size_t bytes = sizeof(int) * n;
// Allocate host memory
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate device memory
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Initialize vectors with random values
matrix_init(h_a, n);
matrix_init(h_b, n);
// Copy data from host to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int NUM_THREADS = 256;
int NUM_BLOCKS = (int)ceil(n / NUM_THREADS);
vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
check_error(h_a, h_b, h_c, n);
printf("Completed.\n");
return 0;
} | #include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void vectorAdd(int* a, int* b, int* c, int n){
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < n){
c[tid] = a[tid] + b[tid];
}
}
void matrix_init(int* a, int n){
for (int i = 0; i < n; i++){
a[i] = rand()%100;
}
}
void check_error(int* a, int* b, int* c, int n){
for (int i = 0; i < n; i++){
assert(c[i] == a[i] + b[i]);
}
}
int main(){
// 2^16
int n = 1 << 16;
// h_ = host variables (cpu)
int *h_a, *h_b, *h_c;
// device variables (gpu)
int *d_a, *d_b, *d_c;
size_t bytes = sizeof(int) * n;
// Allocate host memory
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate device memory
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
// Initialize vectors with random values
matrix_init(h_a, n);
matrix_init(h_b, n);
// Copy data from host to device
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int NUM_THREADS = 256;
int NUM_BLOCKS = (int)ceil(n / NUM_THREADS);
vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, n);
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
check_error(h_a, h_b, h_c, n);
printf("Completed.\n");
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void vectorAdd(int* a, int* b, int* c, int n){
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < n){
c[tid] = a[tid] + b[tid];
}
}
void matrix_init(int* a, int n){
for (int i = 0; i < n; i++){
a[i] = rand()%100;
}
}
void check_error(int* a, int* b, int* c, int n){
for (int i = 0; i < n; i++){
assert(c[i] == a[i] + b[i]);
}
}
int main(){
// 2^16
int n = 1 << 16;
// h_ = host variables (cpu)
int *h_a, *h_b, *h_c;
// device variables (gpu)
int *d_a, *d_b, *d_c;
size_t bytes = sizeof(int) * n;
// Allocate host memory
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate device memory
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
// Initialize vectors with random values
matrix_init(h_a, n);
matrix_init(h_b, n);
// Copy data from host to device
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int NUM_THREADS = 256;
int NUM_BLOCKS = (int)ceil(n / NUM_THREADS);
vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, n);
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
check_error(h_a, h_b, h_c, n);
printf("Completed.\n");
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9vectorAddPiS_S_i
.globl _Z9vectorAddPiS_S_i
.p2align 8
.type _Z9vectorAddPiS_S_i,@function
_Z9vectorAddPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9vectorAddPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9vectorAddPiS_S_i, .Lfunc_end0-_Z9vectorAddPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9vectorAddPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9vectorAddPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void vectorAdd(int* a, int* b, int* c, int n){
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < n){
c[tid] = a[tid] + b[tid];
}
}
void matrix_init(int* a, int n){
for (int i = 0; i < n; i++){
a[i] = rand()%100;
}
}
void check_error(int* a, int* b, int* c, int n){
for (int i = 0; i < n; i++){
assert(c[i] == a[i] + b[i]);
}
}
int main(){
// 2^16
int n = 1 << 16;
// h_ = host variables (cpu)
int *h_a, *h_b, *h_c;
// device variables (gpu)
int *d_a, *d_b, *d_c;
size_t bytes = sizeof(int) * n;
// Allocate host memory
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate device memory
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
// Initialize vectors with random values
matrix_init(h_a, n);
matrix_init(h_b, n);
// Copy data from host to device
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int NUM_THREADS = 256;
int NUM_BLOCKS = (int)ceil(n / NUM_THREADS);
vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, n);
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
check_error(h_a, h_b, h_c, n);
printf("Completed.\n");
return 0;
} | .text
.file "vector_add.hip"
.globl _Z24__device_stub__vectorAddPiS_S_i # -- Begin function _Z24__device_stub__vectorAddPiS_S_i
.p2align 4, 0x90
.type _Z24__device_stub__vectorAddPiS_S_i,@function
_Z24__device_stub__vectorAddPiS_S_i: # @_Z24__device_stub__vectorAddPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9vectorAddPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__vectorAddPiS_S_i, .Lfunc_end0-_Z24__device_stub__vectorAddPiS_S_i
.cfi_endproc
# -- End function
.globl _Z11matrix_initPii # -- Begin function _Z11matrix_initPii
.p2align 4, 0x90
.type _Z11matrix_initPii,@function
_Z11matrix_initPii: # @_Z11matrix_initPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z11matrix_initPii, .Lfunc_end1-_Z11matrix_initPii
.cfi_endproc
# -- End function
.globl _Z11check_errorPiS_S_i # -- Begin function _Z11check_errorPiS_S_i
.p2align 4, 0x90
.type _Z11check_errorPiS_S_i,@function
_Z11check_errorPiS_S_i: # @_Z11check_errorPiS_S_i
.cfi_startproc
# %bb.0:
retq
.Lfunc_end2:
.size _Z11check_errorPiS_S_i, .Lfunc_end2-_Z11check_errorPiS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $152, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $262144, %edi # imm = 0x40000
callq malloc
movq %rax, %r15
movl $262144, %edi # imm = 0x40000
callq malloc
movq %rax, %r14
movl $262144, %edi # imm = 0x40000
callq malloc
movq %rax, %rbx
leaq 32(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
movl %eax, (%r15,%r12,4)
incq %r12
cmpq $65536, %r12 # imm = 0x10000
jne .LBB3_1
# %bb.2: # %.lr.ph.i27.preheader
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_3: # %.lr.ph.i27
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
movl %eax, (%r14,%r12,4)
incq %r12
cmpq $65536, %r12 # imm = 0x10000
jne .LBB3_3
# %bb.4: # %_Z11matrix_initPii.exit31
movq 32(%rsp), %rdi
movl $262144, %edx # imm = 0x40000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $262144, %edx # imm = 0x40000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967552, %rdi # imm = 0x100000100
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_6
# %bb.5:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $65536, 12(%rsp) # imm = 0x10000
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z9vectorAddPiS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_6:
movq 16(%rsp), %rsi
movl $262144, %edx # imm = 0x40000
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9vectorAddPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9vectorAddPiS_S_i,@object # @_Z9vectorAddPiS_S_i
.section .rodata,"a",@progbits
.globl _Z9vectorAddPiS_S_i
.p2align 3, 0x0
_Z9vectorAddPiS_S_i:
.quad _Z24__device_stub__vectorAddPiS_S_i
.size _Z9vectorAddPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9vectorAddPiS_S_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Completed."
.size .Lstr, 11
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__vectorAddPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9vectorAddPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9vectorAddPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9vectorAddPiS_S_i
.globl _Z9vectorAddPiS_S_i
.p2align 8
.type _Z9vectorAddPiS_S_i,@function
_Z9vectorAddPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9vectorAddPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9vectorAddPiS_S_i, .Lfunc_end0-_Z9vectorAddPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9vectorAddPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9vectorAddPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00195d13_00000000-6_vector_add.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11matrix_initPii
.type _Z11matrix_initPii, @function
_Z11matrix_initPii:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $100, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2057:
.size _Z11matrix_initPii, .-_Z11matrix_initPii
.globl _Z11check_errorPiS_S_i
.type _Z11check_errorPiS_S_i, @function
_Z11check_errorPiS_S_i:
.LFB2058:
.cfi_startproc
endbr64
testl %ecx, %ecx
jle .L11
movl $0, %eax
.L13:
addl $1, %eax
cmpl %eax, %ecx
jne .L13
.L11:
ret
.cfi_endproc
.LFE2058:
.size _Z11check_errorPiS_S_i, .-_Z11check_errorPiS_S_i
.globl _Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i
.type _Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i, @function
_Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i:
.LFB2084:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9vectorAddPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i, .-_Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i
.globl _Z9vectorAddPiS_S_i
.type _Z9vectorAddPiS_S_i, @function
_Z9vectorAddPiS_S_i:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z9vectorAddPiS_S_i, .-_Z9vectorAddPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Completed.\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $262144, %edi
call malloc@PLT
movq %rax, %rbp
movl $262144, %edi
call malloc@PLT
movq %rax, %rbx
movl $262144, %edi
call malloc@PLT
movq %rax, %r12
leaq 8(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $262144, %esi
call cudaMalloc@PLT
movl $65536, %esi
movq %rbp, %rdi
call _Z11matrix_initPii
movl $65536, %esi
movq %rbx, %rdi
call _Z11matrix_initPii
movl $1, %ecx
movl $262144, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $262144, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $256, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L27
.L24:
movl $2, %ecx
movl $262144, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L28
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
movl $65536, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z33__device_stub__Z9vectorAddPiS_S_iPiS_S_i
jmp .L24
.L28:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z9vectorAddPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9vectorAddPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "vector_add.hip"
.globl _Z24__device_stub__vectorAddPiS_S_i # -- Begin function _Z24__device_stub__vectorAddPiS_S_i
.p2align 4, 0x90
.type _Z24__device_stub__vectorAddPiS_S_i,@function
_Z24__device_stub__vectorAddPiS_S_i: # @_Z24__device_stub__vectorAddPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9vectorAddPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__vectorAddPiS_S_i, .Lfunc_end0-_Z24__device_stub__vectorAddPiS_S_i
.cfi_endproc
# -- End function
.globl _Z11matrix_initPii # -- Begin function _Z11matrix_initPii
.p2align 4, 0x90
.type _Z11matrix_initPii,@function
_Z11matrix_initPii: # @_Z11matrix_initPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z11matrix_initPii, .Lfunc_end1-_Z11matrix_initPii
.cfi_endproc
# -- End function
.globl _Z11check_errorPiS_S_i # -- Begin function _Z11check_errorPiS_S_i
.p2align 4, 0x90
.type _Z11check_errorPiS_S_i,@function
_Z11check_errorPiS_S_i: # @_Z11check_errorPiS_S_i
.cfi_startproc
# %bb.0:
retq
.Lfunc_end2:
.size _Z11check_errorPiS_S_i, .Lfunc_end2-_Z11check_errorPiS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $152, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $262144, %edi # imm = 0x40000
callq malloc
movq %rax, %r15
movl $262144, %edi # imm = 0x40000
callq malloc
movq %rax, %r14
movl $262144, %edi # imm = 0x40000
callq malloc
movq %rax, %rbx
leaq 32(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $262144, %esi # imm = 0x40000
callq hipMalloc
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
movl %eax, (%r15,%r12,4)
incq %r12
cmpq $65536, %r12 # imm = 0x10000
jne .LBB3_1
# %bb.2: # %.lr.ph.i27.preheader
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_3: # %.lr.ph.i27
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
movl %eax, (%r14,%r12,4)
incq %r12
cmpq $65536, %r12 # imm = 0x10000
jne .LBB3_3
# %bb.4: # %_Z11matrix_initPii.exit31
movq 32(%rsp), %rdi
movl $262144, %edx # imm = 0x40000
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $262144, %edx # imm = 0x40000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967552, %rdi # imm = 0x100000100
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_6
# %bb.5:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $65536, 12(%rsp) # imm = 0x10000
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z9vectorAddPiS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_6:
movq 16(%rsp), %rsi
movl $262144, %edx # imm = 0x40000
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9vectorAddPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9vectorAddPiS_S_i,@object # @_Z9vectorAddPiS_S_i
.section .rodata,"a",@progbits
.globl _Z9vectorAddPiS_S_i
.p2align 3, 0x0
_Z9vectorAddPiS_S_i:
.quad _Z24__device_stub__vectorAddPiS_S_i
.size _Z9vectorAddPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9vectorAddPiS_S_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Completed."
.size .Lstr, 11
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__vectorAddPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9vectorAddPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
void initialize(int* input, int size) {
for (int index = 0; index < size; index++) {
input[index] = index;
}
}
int cpu_reduction(int* input, int size) {
int result = 0;
for (int index = 0; index < size; index++) {
result += index;
}
return result;
}
void showResult(int cpu_result, int gpu_result) {
printf("cpu_result: %d, gpu_result: %d\n", cpu_result, gpu_result);
}
//// 1. reduction neighbored pairs kernel
__global__ void redunction_v1(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
if (tid % (2 * offset) == 0) {
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
//// 2. warp_divergence_improved of #1 reduction_v1
__global__ void reduction_v1_improved(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
// local data block pointer
int* i_data = input + blockDim.x * blockIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
int index = 2 * offset * tid;
if (index < blockDim.x) {
i_data[index] += i_data[index + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
int main(int argc, char** argv) {
// int size = 1 << 27; // 128 Mb of data
int size = 512;
dim3 block(128);
dim3 grid(size / block.x);
int* h_ref = (int*)malloc(grid.x * sizeof(int));
int gpu_result;
//// input
int* h_input;
h_input = (int*)malloc(size * sizeof(int));
initialize(h_input, size);
//// cpu redunction
int cpu_result = cpu_reduction(h_input, size);
//// gpu redunction
int *d_input, *d_output;
cudaMalloc((void**)&d_input, size * sizeof(int));
cudaMalloc((void**)&d_output, grid.x * sizeof(int));
//// #1
cudaMemcpy(d_input, h_input, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_output, 0, grid.x * sizeof(int));
redunction_v1<<<grid, block>>>(d_input, d_output, size);
cudaDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
cudaMemcpy(h_ref, d_output, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
//// #2
cudaMemcpy(d_input, h_input, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_output, 0, grid.x * sizeof(int));
reduction_v1_improved<<<grid, block>>>(d_input, d_output, size);
cudaDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
cudaMemcpy(h_ref, d_output, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
cudaFree(d_output);
cudaFree(d_input);
free(h_ref);
free(h_input);
cudaDeviceReset();
return 0;
} | code for sm_80
Function : _Z21reduction_v1_improvedPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */
/* 0x000e280000002500 */
/*0020*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e620000002100 */
/*0030*/ IMAD R0, R8, c[0x0][0x0], RZ ; /* 0x0000000008007a24 */
/* 0x001fc800078e02ff */
/*0040*/ IMAD.IADD R6, R0, 0x1, R9 ; /* 0x0000000100067824 */
/* 0x002fca00078e0209 */
/*0050*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x170], PT ; /* 0x00005c0006007a0c */
/* 0x000fda0003f06270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe40000000800 */
/*0080*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe40008011604 */
/*0090*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fc80000000a00 */
/*00a0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf05270 */
/*00b0*/ @!P0 BRA 0x240 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x1 ; /* 0x00000001ff027424 */
/* 0x000fc800078e00ff */
/*00d0*/ IMAD.SHL.U32 R14, R2, 0x2, RZ ; /* 0x00000002020e7824 */
/* 0x000fe200078e00ff */
/*00e0*/ BSSY B0, 0x200 ; /* 0x0000011000007945 */
/* 0x000fe60003800000 */
/*00f0*/ IMAD R11, R14, R9, RZ ; /* 0x000000090e0b7224 */
/* 0x000fca00078e02ff */
/*0100*/ ISETP.GE.U32.AND P0, PT, R11, c[0x0][0x0], PT ; /* 0x000000000b007a0c */
/* 0x000fda0003f06070 */
/*0110*/ @P0 BRA 0x1f0 ; /* 0x000000d000000947 */
/* 0x000fea0003800000 */
/*0120*/ IMAD.IADD R3, R11, 0x1, R2 ; /* 0x000000010b037824 */
/* 0x000fe200078e0202 */
/*0130*/ IADD3 R5, P0, R0, R11, RZ ; /* 0x0000000b00057210 */
/* 0x000fc80007f1e0ff */
/*0140*/ IADD3 R7, P1, R0, R3, RZ ; /* 0x0000000300077210 */
/* 0x000fe40007f3e0ff */
/*0150*/ LEA.HI.X.SX32 R10, R11, RZ, 0x1, P0 ; /* 0x000000ff0b0a7211 */
/* 0x000fe400000f0eff */
/*0160*/ LEA R2, P0, R5, c[0x0][0x160], 0x2 ; /* 0x0000580005027a11 */
/* 0x000fe400078010ff */
/*0170*/ LEA.HI.X.SX32 R12, R3, RZ, 0x1, P1 ; /* 0x000000ff030c7211 */
/* 0x000fe400008f0eff */
/*0180*/ LEA R4, P1, R7, c[0x0][0x160], 0x2 ; /* 0x0000580007047a11 */
/* 0x000fe400078210ff */
/*0190*/ LEA.HI.X R3, R5, c[0x0][0x164], R10, 0x2, P0 ; /* 0x0000590005037a11 */
/* 0x000fc400000f140a */
/*01a0*/ LEA.HI.X R5, R7, c[0x0][0x164], R12, 0x2, P1 ; /* 0x0000590007057a11 */
/* 0x000fc600008f140c */
/*01b0*/ LDG.E R7, [R2.64] ; /* 0x0000000602077981 */
/* 0x000ea8000c1e1900 */
/*01c0*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea4000c1e1900 */
/*01d0*/ IMAD.IADD R7, R7, 0x1, R4 ; /* 0x0000000107077824 */
/* 0x004fca00078e0204 */
/*01e0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101906 */
/*01f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0200*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0210*/ ISETP.GT.U32.AND P0, PT, R14, UR4, PT ; /* 0x000000040e007c0c */
/* 0x000fe2000bf04070 */
/*0220*/ IMAD.MOV.U32 R2, RZ, RZ, R14 ; /* 0x000000ffff027224 */
/* 0x001fd800078e000e */
/*0230*/ @!P0 BRA 0xd0 ; /* 0xfffffe9000008947 */
/* 0x000fea000383ffff */
/*0240*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fda0003f05270 */
/*0250*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0260*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fc800078e00ff */
/*0270*/ IMAD.WIDE R2, R6, R5, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fcc00078e0205 */
/*0280*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea2000c1e1900 */
/*0290*/ IMAD.WIDE.U32 R4, R8, R5, c[0x0][0x168] ; /* 0x00005a0008047625 */
/* 0x000fca00078e0005 */
/*02a0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101906 */
/*02b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02c0*/ BRA 0x2c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13redunction_v1PiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R0, c[0x0][0x0], R5 ; /* 0x0000000000027a24 */
/* 0x001fca00078e0205 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0070*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0080*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe40008011604 */
/*0090*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*00a0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc600078e0203 */
/*00b0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf05270 */
/*00c0*/ @!P0 BRA 0x340 ; /* 0x0000027000008947 */
/* 0x000fea0003800000 */
/*00d0*/ HFMA2.MMA R7, -RZ, RZ, 0, 5.9604644775390625e-08 ; /* 0x00000001ff077435 */
/* 0x000fe200000001ff */
/*00e0*/ IABS R4, R5 ; /* 0x0000000500047213 */
/* 0x000fd20000000000 */
/*00f0*/ IMAD.SHL.U32 R6, R7, 0x2, RZ ; /* 0x0000000207067824 */
/* 0x000fe200078e00ff */
/*0100*/ IABS R14, R5 ; /* 0x00000005000e7213 */
/* 0x000fe20000000000 */
/*0110*/ BSSY B0, 0x300 ; /* 0x000001e000007945 */
/* 0x000fe20003800000 */
/*0120*/ ISETP.GE.AND P2, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f46270 */
/*0130*/ IABS R12, R6.reuse ; /* 0x00000006000c7213 */
/* 0x080fe40000000000 */
/*0140*/ IABS R13, R6 ; /* 0x00000006000d7213 */
/* 0x000fe40000000000 */
/*0150*/ I2F.RP R10, R12 ; /* 0x0000000c000a7306 */
/* 0x000e300000209400 */
/*0160*/ MUFU.RCP R10, R10 ; /* 0x0000000a000a7308 */
/* 0x001e240000001000 */
/*0170*/ IADD3 R8, R10, 0xffffffe, RZ ; /* 0x0ffffffe0a087810 */
/* 0x001fe20007ffe0ff */
/*0180*/ IMAD.MOV R10, RZ, RZ, -R13 ; /* 0x000000ffff0a7224 */
/* 0x000fca00078e0a0d */
/*0190*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000064000021f000 */
/*01a0*/ MOV R8, RZ ; /* 0x000000ff00087202 */
/* 0x001fe20000000f00 */
/*01b0*/ IMAD.MOV R11, RZ, RZ, -R9 ; /* 0x000000ffff0b7224 */
/* 0x002fc800078e0a09 */
/*01c0*/ IMAD R11, R11, R12, RZ ; /* 0x0000000c0b0b7224 */
/* 0x000fc800078e02ff */
/*01d0*/ IMAD.HI.U32 R9, R9, R11, R8 ; /* 0x0000000b09097227 */
/* 0x000fc800078e0008 */
/*01e0*/ IMAD.MOV.U32 R11, RZ, RZ, R14 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e000e */
/*01f0*/ IMAD.HI.U32 R9, R9, R4, RZ ; /* 0x0000000409097227 */
/* 0x000fc800078e00ff */
/*0200*/ IMAD R9, R9, R10, R11 ; /* 0x0000000a09097224 */
/* 0x000fca00078e020b */
/*0210*/ ISETP.GT.U32.AND P0, PT, R12, R9, PT ; /* 0x000000090c00720c */
/* 0x000fda0003f04070 */
/*0220*/ @!P0 IADD3 R9, R9, -R12, RZ ; /* 0x8000000c09098210 */
/* 0x000fe40007ffe0ff */
/*0230*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f05270 */
/*0240*/ ISETP.GT.U32.AND P1, PT, R12, R9, PT ; /* 0x000000090c00720c */
/* 0x000fda0003f24070 */
/*0250*/ @!P1 IMAD.IADD R9, R9, 0x1, -R12 ; /* 0x0000000109099824 */
/* 0x000fc800078e0a0c */
/*0260*/ @!P2 IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff09a224 */
/* 0x000fe200078e0a09 */
/*0270*/ @!P0 LOP3.LUT R9, RZ, R6, RZ, 0x33, !PT ; /* 0x00000006ff098212 */
/* 0x000fc800078e33ff */
/*0280*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fda0003f05270 */
/*0290*/ @P0 BRA 0x2f0 ; /* 0x0000005000000947 */
/* 0x000fea0003800000 */
/*02a0*/ IMAD.WIDE R8, R7, 0x4, R2 ; /* 0x0000000407087825 */
/* 0x000fe400078e0202 */
/*02b0*/ LDG.E R7, [R2.64] ; /* 0x0000000602077981 */
/* 0x000ea8000c1e1900 */
/*02c0*/ LDG.E R8, [R8.64] ; /* 0x0000000608087981 */
/* 0x000ea4000c1e1900 */
/*02d0*/ IADD3 R7, R7, R8, RZ ; /* 0x0000000807077210 */
/* 0x004fca0007ffe0ff */
/*02e0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e4000c101906 */
/*02f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0300*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0310*/ ISETP.GT.U32.AND P0, PT, R6, UR4, PT ; /* 0x0000000406007c0c */
/* 0x000fe2000bf04070 */
/*0320*/ IMAD.MOV.U32 R7, RZ, RZ, R6 ; /* 0x000000ffff077224 */
/* 0x001fd800078e0006 */
/*0330*/ @!P0 BRA 0xf0 ; /* 0xfffffdb000008947 */
/* 0x000fea000383ffff */
/*0340*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0350*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0360*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea2000c1e1900 */
/*0370*/ MOV R5, 0x4 ; /* 0x0000000400057802 */
/* 0x000fca0000000f00 */
/*0380*/ IMAD.WIDE.U32 R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fca00078e0005 */
/*0390*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101906 */
/*03a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03b0*/ BRA 0x3b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
void initialize(int* input, int size) {
for (int index = 0; index < size; index++) {
input[index] = index;
}
}
int cpu_reduction(int* input, int size) {
int result = 0;
for (int index = 0; index < size; index++) {
result += index;
}
return result;
}
void showResult(int cpu_result, int gpu_result) {
printf("cpu_result: %d, gpu_result: %d\n", cpu_result, gpu_result);
}
//// 1. reduction neighbored pairs kernel
__global__ void redunction_v1(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
if (tid % (2 * offset) == 0) {
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
//// 2. warp_divergence_improved of #1 reduction_v1
__global__ void reduction_v1_improved(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
// local data block pointer
int* i_data = input + blockDim.x * blockIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
int index = 2 * offset * tid;
if (index < blockDim.x) {
i_data[index] += i_data[index + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
int main(int argc, char** argv) {
// int size = 1 << 27; // 128 Mb of data
int size = 512;
dim3 block(128);
dim3 grid(size / block.x);
int* h_ref = (int*)malloc(grid.x * sizeof(int));
int gpu_result;
//// input
int* h_input;
h_input = (int*)malloc(size * sizeof(int));
initialize(h_input, size);
//// cpu redunction
int cpu_result = cpu_reduction(h_input, size);
//// gpu redunction
int *d_input, *d_output;
cudaMalloc((void**)&d_input, size * sizeof(int));
cudaMalloc((void**)&d_output, grid.x * sizeof(int));
//// #1
cudaMemcpy(d_input, h_input, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_output, 0, grid.x * sizeof(int));
redunction_v1<<<grid, block>>>(d_input, d_output, size);
cudaDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
cudaMemcpy(h_ref, d_output, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
//// #2
cudaMemcpy(d_input, h_input, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_output, 0, grid.x * sizeof(int));
reduction_v1_improved<<<grid, block>>>(d_input, d_output, size);
cudaDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
cudaMemcpy(h_ref, d_output, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
cudaFree(d_output);
cudaFree(d_input);
free(h_ref);
free(h_input);
cudaDeviceReset();
return 0;
} | .file "tmpxft_00110448_00000000-6_reduction.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10initializePii
.type _Z10initializePii, @function
_Z10initializePii:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L3
movslq %esi, %rsi
movl $0, %eax
.L5:
movl %eax, (%rdi,%rax,4)
addq $1, %rax
cmpq %rsi, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z10initializePii, .-_Z10initializePii
.globl _Z13cpu_reductionPii
.type _Z13cpu_reductionPii, @function
_Z13cpu_reductionPii:
.LFB2058:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L10
movl $0, %eax
movl $0, %edx
.L9:
addl %eax, %edx
addl $1, %eax
cmpl %eax, %esi
jne .L9
.L7:
movl %edx, %eax
ret
.L10:
movl $0, %edx
jmp .L7
.cfi_endproc
.LFE2058:
.size _Z13cpu_reductionPii, .-_Z13cpu_reductionPii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "cpu_result: %d, gpu_result: %d\n"
.text
.globl _Z10showResultii
.type _Z10showResultii, @function
_Z10showResultii:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl %edi, %edx
movl %esi, %ecx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z10showResultii, .-_Z10showResultii
.globl _Z36__device_stub__Z13redunction_v1PiS_iPiS_i
.type _Z36__device_stub__Z13redunction_v1PiS_iPiS_i, @function
_Z36__device_stub__Z13redunction_v1PiS_iPiS_i:
.LFB2085:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13redunction_v1PiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L14
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z36__device_stub__Z13redunction_v1PiS_iPiS_i, .-_Z36__device_stub__Z13redunction_v1PiS_iPiS_i
.globl _Z13redunction_v1PiS_i
.type _Z13redunction_v1PiS_i, @function
_Z13redunction_v1PiS_i:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z13redunction_v1PiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z13redunction_v1PiS_i, .-_Z13redunction_v1PiS_i
.globl _Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i
.type _Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i, @function
_Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i:
.LFB2087:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L26
.L22:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L27
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z21reduction_v1_improvedPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L22
.L27:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i, .-_Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i
.globl _Z21reduction_v1_improvedPiS_i
.type _Z21reduction_v1_improvedPiS_i, @function
_Z21reduction_v1_improvedPiS_i:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z21reduction_v1_improvedPiS_i, .-_Z21reduction_v1_improvedPiS_i
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $56, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $16, %edi
call malloc@PLT
movq %rax, %rbx
movl $2048, %edi
call malloc@PLT
movq %rax, %rbp
movl $512, %esi
movq %rax, %rdi
call _Z10initializePii
movl $512, %eax
.L31:
subl $1, %eax
jne .L31
movq %rsp, %rdi
movl $2048, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $16, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $2048, %edx
movq %rbp, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $16, %edx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
movl $4, 28(%rsp)
movl $128, 16(%rsp)
movl 24(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 16(%rsp), %rdx
movq 28(%rsp), %rdi
movl 36(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L32:
call cudaDeviceSynchronize@PLT
pxor %xmm0, %xmm0
movups %xmm0, (%rbx)
movl $2, %ecx
movl $16, %edx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl 4(%rbx), %esi
addl (%rbx), %esi
addl 8(%rbx), %esi
addl 12(%rbx), %esi
movl $130816, %edi
call _Z10showResultii
movl $1, %ecx
movl $2048, %edx
movq %rbp, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $16, %edx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
movl 24(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 16(%rsp), %rdx
movq 28(%rsp), %rdi
movl 36(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L33:
call cudaDeviceSynchronize@PLT
pxor %xmm0, %xmm0
movups %xmm0, (%rbx)
movl $2, %ecx
movl $16, %edx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl 4(%rbx), %esi
addl (%rbx), %esi
addl 8(%rbx), %esi
addl 12(%rbx), %esi
movl $130816, %edi
call _Z10showResultii
movq 8(%rsp), %rdi
call cudaFree@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
call cudaDeviceReset@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L39
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
movl $512, %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z36__device_stub__Z13redunction_v1PiS_iPiS_i
jmp .L32
.L38:
movl $512, %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z44__device_stub__Z21reduction_v1_improvedPiS_iPiS_i
jmp .L33
.L39:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC1:
.string "_Z21reduction_v1_improvedPiS_i"
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "_Z13redunction_v1PiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z21reduction_v1_improvedPiS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z13redunction_v1PiS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
void initialize(int* input, int size) {
for (int index = 0; index < size; index++) {
input[index] = index;
}
}
int cpu_reduction(int* input, int size) {
int result = 0;
for (int index = 0; index < size; index++) {
result += index;
}
return result;
}
void showResult(int cpu_result, int gpu_result) {
printf("cpu_result: %d, gpu_result: %d\n", cpu_result, gpu_result);
}
//// 1. reduction neighbored pairs kernel
__global__ void redunction_v1(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
if (tid % (2 * offset) == 0) {
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
//// 2. warp_divergence_improved of #1 reduction_v1
__global__ void reduction_v1_improved(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
// local data block pointer
int* i_data = input + blockDim.x * blockIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
int index = 2 * offset * tid;
if (index < blockDim.x) {
i_data[index] += i_data[index + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
int main(int argc, char** argv) {
// int size = 1 << 27; // 128 Mb of data
int size = 512;
dim3 block(128);
dim3 grid(size / block.x);
int* h_ref = (int*)malloc(grid.x * sizeof(int));
int gpu_result;
//// input
int* h_input;
h_input = (int*)malloc(size * sizeof(int));
initialize(h_input, size);
//// cpu redunction
int cpu_result = cpu_reduction(h_input, size);
//// gpu redunction
int *d_input, *d_output;
cudaMalloc((void**)&d_input, size * sizeof(int));
cudaMalloc((void**)&d_output, grid.x * sizeof(int));
//// #1
cudaMemcpy(d_input, h_input, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_output, 0, grid.x * sizeof(int));
redunction_v1<<<grid, block>>>(d_input, d_output, size);
cudaDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
cudaMemcpy(h_ref, d_output, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
//// #2
cudaMemcpy(d_input, h_input, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_output, 0, grid.x * sizeof(int));
reduction_v1_improved<<<grid, block>>>(d_input, d_output, size);
cudaDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
cudaMemcpy(h_ref, d_output, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
cudaFree(d_output);
cudaFree(d_input);
free(h_ref);
free(h_input);
cudaDeviceReset();
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
void initialize(int* input, int size) {
for (int index = 0; index < size; index++) {
input[index] = index;
}
}
int cpu_reduction(int* input, int size) {
int result = 0;
for (int index = 0; index < size; index++) {
result += index;
}
return result;
}
void showResult(int cpu_result, int gpu_result) {
printf("cpu_result: %d, gpu_result: %d\n", cpu_result, gpu_result);
}
//// 1. reduction neighbored pairs kernel
__global__ void redunction_v1(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
if (tid % (2 * offset) == 0) {
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
//// 2. warp_divergence_improved of #1 reduction_v1
__global__ void reduction_v1_improved(int* input, int* output, int size) {
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
// local data block pointer
int* i_data = input + blockDim.x * blockIdx.x;
if (gid >= size) return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2) {
int index = 2 * offset * tid;
if (index < blockDim.x) {
i_data[index] += i_data[index + offset];
}
__syncthreads();
}
if (tid == 0) {
output[blockIdx.x] = input[gid];
}
}
int main(int argc, char** argv) {
// int size = 1 << 27; // 128 Mb of data
int size = 512;
dim3 block(128);
dim3 grid(size / block.x);
int* h_ref = (int*)malloc(grid.x * sizeof(int));
int gpu_result;
//// input
int* h_input;
h_input = (int*)malloc(size * sizeof(int));
initialize(h_input, size);
//// cpu redunction
int cpu_result = cpu_reduction(h_input, size);
//// gpu redunction
int *d_input, *d_output;
hipMalloc((void**)&d_input, size * sizeof(int));
hipMalloc((void**)&d_output, grid.x * sizeof(int));
//// #1
hipMemcpy(d_input, h_input, size * sizeof(int), hipMemcpyHostToDevice);
hipMemset(d_output, 0, grid.x * sizeof(int));
redunction_v1<<<grid, block>>>(d_input, d_output, size);
hipDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
hipMemcpy(h_ref, d_output, grid.x * sizeof(int), hipMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
//// #2
hipMemcpy(d_input, h_input, size * sizeof(int), hipMemcpyHostToDevice);
hipMemset(d_output, 0, grid.x * sizeof(int));
reduction_v1_improved<<<grid, block>>>(d_input, d_output, size);
hipDeviceSynchronize();
memset(h_ref, 0, grid.x * sizeof(int));
hipMemcpy(h_ref, d_output, grid.x * sizeof(int), hipMemcpyDeviceToHost);
gpu_result = 0;
for (int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
showResult(cpu_result, gpu_result);
hipFree(d_output);
hipFree(d_input);
free(h_ref);
free(h_input);
hipDeviceReset();
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.