system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void NormalizationExecutionKernel(unsigned char* src, float* dst, const int size, const float alpha, const float beta, const float bias)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < size){
dst[index] = (float)(src[index] - alpha) / beta + bias;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z28NormalizationExecutionKernelPhPfifff
.globl _Z28NormalizationExecutionKernelPhPfifff
.p2align 8
.type _Z28NormalizationExecutionKernelPhPfifff,@function
_Z28NormalizationExecutionKernelPhPfifff:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x14
v_ashrrev_i32_e32 v2, 31, v1
s_load_b32 s0, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo
global_load_u8 v0, v[3:4], off
s_waitcnt vmcnt(0)
v_cvt_f32_ubyte0_e32 v0, v0
v_subrev_f32_e32 v0, s2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v3, null, s3, s3, v0
v_div_scale_f32 v6, vcc_lo, v0, s3, v0
v_rcp_f32_e32 v4, v3
s_waitcnt_depctr 0xfff
v_fma_f32 v5, -v3, v4, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v5, v4
v_mul_f32_e32 v5, v6, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v7, -v3, v5, v6
v_fmac_f32_e32 v5, v7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v3, -v3, v5, v6
v_div_fmas_f32 v3, v3, v4, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_fixup_f32 v3, v3, s3, v0
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_f32_e32 v2, s0, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z28NormalizationExecutionKernelPhPfifff
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z28NormalizationExecutionKernelPhPfifff, .Lfunc_end0-_Z28NormalizationExecutionKernelPhPfifff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z28NormalizationExecutionKernelPhPfifff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z28NormalizationExecutionKernelPhPfifff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void NormalizationExecutionKernel(unsigned char* src, float* dst, const int size, const float alpha, const float beta, const float bias)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < size){
dst[index] = (float)(src[index] - alpha) / beta + bias;
}
} | .text
.file "NormalizationExecutionKernel.hip"
.globl _Z43__device_stub__NormalizationExecutionKernelPhPfifff # -- Begin function _Z43__device_stub__NormalizationExecutionKernelPhPfifff
.p2align 4, 0x90
.type _Z43__device_stub__NormalizationExecutionKernelPhPfifff,@function
_Z43__device_stub__NormalizationExecutionKernelPhPfifff: # @_Z43__device_stub__NormalizationExecutionKernelPhPfifff
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
movss %xmm1, 4(%rsp)
movss %xmm2, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z28NormalizationExecutionKernelPhPfifff, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z43__device_stub__NormalizationExecutionKernelPhPfifff, .Lfunc_end0-_Z43__device_stub__NormalizationExecutionKernelPhPfifff
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z28NormalizationExecutionKernelPhPfifff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z28NormalizationExecutionKernelPhPfifff,@object # @_Z28NormalizationExecutionKernelPhPfifff
.section .rodata,"a",@progbits
.globl _Z28NormalizationExecutionKernelPhPfifff
.p2align 3, 0x0
_Z28NormalizationExecutionKernelPhPfifff:
.quad _Z43__device_stub__NormalizationExecutionKernelPhPfifff
.size _Z28NormalizationExecutionKernelPhPfifff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z28NormalizationExecutionKernelPhPfifff"
.size .L__unnamed_1, 41
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z43__device_stub__NormalizationExecutionKernelPhPfifff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z28NormalizationExecutionKernelPhPfifff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z28NormalizationExecutionKernelPhPfifff
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IADD3 R2, P0, R0, c[0x0][0x160], RZ ; /* 0x0000580000027a10 */
/* 0x000fe20007f1e0ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ LEA.HI.X.SX32 R3, R0, c[0x0][0x164], 0x1, P0 ; /* 0x0000590000037a11 */
/* 0x000fca00000f0eff */
/*0090*/ LDG.E.U8 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1100 */
/*00a0*/ MUFU.RCP R5, c[0x0][0x178] ; /* 0x00005e0000057b08 */
/* 0x000e220000001000 */
/*00b0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */
/* 0x000fe200078e00ff */
/*00c0*/ BSSY B0, 0x190 ; /* 0x000000c000007945 */
/* 0x000fe60003800000 */
/*00d0*/ FFMA R6, R5, -R6, 1 ; /* 0x3f80000005067423 */
/* 0x001fc80000000806 */
/*00e0*/ FFMA R5, R5, R6, R5 ; /* 0x0000000605057223 */
/* 0x000fe20000000005 */
/*00f0*/ I2F.U16 R4, R2 ; /* 0x0000000200047306 */
/* 0x004e240000101000 */
/*0100*/ FADD R4, R4, -c[0x0][0x174] ; /* 0x80005d0004047621 */
/* 0x001fcc0000000000 */
/*0110*/ FCHK P0, R4, c[0x0][0x178] ; /* 0x00005e0004007b02 */
/* 0x000e220000000000 */
/*0120*/ FFMA R6, R4, R5, RZ ; /* 0x0000000504067223 */
/* 0x000fc800000000ff */
/*0130*/ FFMA R7, R6, -c[0x0][0x178], R4 ; /* 0x80005e0006077a23 */
/* 0x000fc80000000004 */
/*0140*/ FFMA R5, R5, R7, R6 ; /* 0x0000000705057223 */
/* 0x000fe20000000006 */
/*0150*/ @!P0 BRA 0x180 ; /* 0x0000002000008947 */
/* 0x001fea0003800000 */
/*0160*/ MOV R2, 0x180 ; /* 0x0000018000027802 */
/* 0x000fe40000000f00 */
/*0170*/ CALL.REL.NOINC 0x1e0 ; /* 0x0000006000007944 */
/* 0x000fea0003c00000 */
/*0180*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0190*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe400078e00ff */
/*01a0*/ FADD R5, R5, c[0x0][0x17c] ; /* 0x00005f0005057621 */
/* 0x000fe40000000000 */
/*01b0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e0203 */
/*01c0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff0b7624 */
/* 0x000fe200078e00ff */
/*01f0*/ SHF.R.U32.HI R3, RZ, 0x17, R4.reuse ; /* 0x00000017ff037819 */
/* 0x100fe20000011604 */
/*0200*/ BSSY B1, 0x850 ; /* 0x0000064000017945 */
/* 0x000fe20003800000 */
/*0210*/ IMAD.MOV.U32 R6, RZ, RZ, R4 ; /* 0x000000ffff067224 */
/* 0x000fe400078e0004 */
/*0220*/ SHF.R.U32.HI R5, RZ, 0x17, R11 ; /* 0x00000017ff057819 */
/* 0x000fe2000001160b */
/*0230*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff077624 */
/* 0x000fe200078e00ff */
/*0240*/ LOP3.LUT R3, R3, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff03037812 */
/* 0x000fe400078ec0ff */
/*0250*/ LOP3.LUT R5, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff05057812 */
/* 0x000fc400078ec0ff */
/*0260*/ IADD3 R9, R3, -0x1, RZ ; /* 0xffffffff03097810 */
/* 0x000fe40007ffe0ff */
/*0270*/ IADD3 R10, R5, -0x1, RZ ; /* 0xffffffff050a7810 */
/* 0x000fc80007ffe0ff */
/*0280*/ ISETP.GT.U32.AND P0, PT, R10, 0xfd, PT ; /* 0x000000fd0a00780c */
/* 0x000fc80003f04070 */
/*0290*/ ISETP.GT.U32.OR P0, PT, R9, 0xfd, P0 ; /* 0x000000fd0900780c */
/* 0x000fda0000704470 */
/*02a0*/ @!P0 IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff088224 */
/* 0x000fe200078e00ff */
/*02b0*/ @!P0 BRA 0x430 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*02c0*/ FSETP.GTU.FTZ.AND P1, PT, |R11|, +INF , PT ; /* 0x7f8000000b00780b */
/* 0x000fe40003f3c200 */
/*02d0*/ FSETP.GTU.FTZ.AND P0, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fc80003f1c200 */
/*02e0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000703570 */
/*02f0*/ @P0 BRA 0x830 ; /* 0x0000053000000947 */
/* 0x000fea0003800000 */
/*0300*/ LOP3.LUT P0, RZ, R7, 0x7fffffff, R6, 0xc8, !PT ; /* 0x7fffffff07ff7812 */
/* 0x000fda000780c806 */
/*0310*/ @!P0 BRA 0x810 ; /* 0x000004f000008947 */
/* 0x000fea0003800000 */
/*0320*/ FSETP.NEU.FTZ.AND P2, PT, |R4|.reuse, +INF , PT ; /* 0x7f8000000400780b */
/* 0x040fe40003f5d200 */
/*0330*/ FSETP.NEU.FTZ.AND P1, PT, |R11|, +INF , PT ; /* 0x7f8000000b00780b */
/* 0x000fe40003f3d200 */
/*0340*/ FSETP.NEU.FTZ.AND P0, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fd60003f1d200 */
/*0350*/ @!P1 BRA !P2, 0x810 ; /* 0x000004b000009947 */
/* 0x000fea0005000000 */
/*0360*/ LOP3.LUT P2, RZ, R6, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff06ff7812 */
/* 0x000fc8000784c0ff */
/*0370*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000f24572 */
/*0380*/ @P1 BRA 0x7f0 ; /* 0x0000046000001947 */
/* 0x000fea0003800000 */
/*0390*/ LOP3.LUT P1, RZ, R7, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff07ff7812 */
/* 0x000fc8000782c0ff */
/*03a0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000702572 */
/*03b0*/ @P0 BRA 0x7c0 ; /* 0x0000040000000947 */
/* 0x000fea0003800000 */
/*03c0*/ ISETP.GE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe40003f06270 */
/*03d0*/ ISETP.GE.AND P1, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fd60003f26270 */
/*03e0*/ @P0 IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff080224 */
/* 0x000fe400078e00ff */
/*03f0*/ @!P0 IMAD.MOV.U32 R8, RZ, RZ, -0x40 ; /* 0xffffffc0ff088424 */
/* 0x000fe400078e00ff */
/*0400*/ @!P0 FFMA R6, R4, 1.84467440737095516160e+19, RZ ; /* 0x5f80000004068823 */
/* 0x000fe400000000ff */
/*0410*/ @!P1 FFMA R7, R11, 1.84467440737095516160e+19, RZ ; /* 0x5f8000000b079823 */
/* 0x000fe200000000ff */
/*0420*/ @!P1 IADD3 R8, R8, 0x40, RZ ; /* 0x0000004008089810 */
/* 0x000fe40007ffe0ff */
/*0430*/ LEA R4, R5, 0xc0800000, 0x17 ; /* 0xc080000005047811 */
/* 0x000fe200078eb8ff */
/*0440*/ BSSY B2, 0x7b0 ; /* 0x0000036000027945 */
/* 0x000fe80003800000 */
/*0450*/ IMAD.IADD R7, R7, 0x1, -R4 ; /* 0x0000000107077824 */
/* 0x000fe200078e0a04 */
/*0460*/ IADD3 R4, R3, -0x7f, RZ ; /* 0xffffff8103047810 */
/* 0x000fc60007ffe0ff */
/*0470*/ MUFU.RCP R9, R7 ; /* 0x0000000700097308 */
/* 0x000e220000001000 */
/*0480*/ FADD.FTZ R11, -R7, -RZ ; /* 0x800000ff070b7221 */
/* 0x000fe20000010100 */
/*0490*/ IADD3 R5, R4.reuse, 0x7f, -R5 ; /* 0x0000007f04057810 */
/* 0x040fe20007ffe805 */
/*04a0*/ IMAD R6, R4, -0x800000, R6 ; /* 0xff80000004067824 */
/* 0x000fc800078e0206 */
/*04b0*/ IMAD.IADD R5, R5, 0x1, R8 ; /* 0x0000000105057824 */
/* 0x000fe400078e0208 */
/*04c0*/ FFMA R10, R9, R11, 1 ; /* 0x3f800000090a7423 */
/* 0x001fc8000000000b */
/*04d0*/ FFMA R9, R9, R10, R9 ; /* 0x0000000a09097223 */
/* 0x000fc80000000009 */
/*04e0*/ FFMA R3, R6, R9, RZ ; /* 0x0000000906037223 */
/* 0x000fc800000000ff */
/*04f0*/ FFMA R10, R11, R3, R6 ; /* 0x000000030b0a7223 */
/* 0x000fc80000000006 */
/*0500*/ FFMA R10, R9, R10, R3 ; /* 0x0000000a090a7223 */
/* 0x000fc80000000003 */
/*0510*/ FFMA R11, R11, R10, R6 ; /* 0x0000000a0b0b7223 */
/* 0x000fc80000000006 */
/*0520*/ FFMA R3, R9, R11, R10 ; /* 0x0000000b09037223 */
/* 0x000fca000000000a */
/*0530*/ SHF.R.U32.HI R4, RZ, 0x17, R3 ; /* 0x00000017ff047819 */
/* 0x000fc80000011603 */
/*0540*/ LOP3.LUT R4, R4, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff04047812 */
/* 0x000fca00078ec0ff */
/*0550*/ IMAD.IADD R8, R4, 0x1, R5 ; /* 0x0000000104087824 */
/* 0x000fca00078e0205 */
/*0560*/ IADD3 R4, R8, -0x1, RZ ; /* 0xffffffff08047810 */
/* 0x000fc80007ffe0ff */
/*0570*/ ISETP.GE.U32.AND P0, PT, R4, 0xfe, PT ; /* 0x000000fe0400780c */
/* 0x000fda0003f06070 */
/*0580*/ @!P0 BRA 0x790 ; /* 0x0000020000008947 */
/* 0x000fea0003800000 */
/*0590*/ ISETP.GT.AND P0, PT, R8, 0xfe, PT ; /* 0x000000fe0800780c */
/* 0x000fda0003f04270 */
/*05a0*/ @P0 BRA 0x760 ; /* 0x000001b000000947 */
/* 0x000fea0003800000 */
/*05b0*/ ISETP.GE.AND P0, PT, R8, 0x1, PT ; /* 0x000000010800780c */
/* 0x000fda0003f06270 */
/*05c0*/ @P0 BRA 0x7a0 ; /* 0x000001d000000947 */
/* 0x000fea0003800000 */
/*05d0*/ ISETP.GE.AND P0, PT, R8, -0x18, PT ; /* 0xffffffe80800780c */
/* 0x000fe40003f06270 */
/*05e0*/ LOP3.LUT R3, R3, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000003037812 */
/* 0x000fd600078ec0ff */
/*05f0*/ @!P0 BRA 0x7a0 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0600*/ FFMA.RZ R4, R9.reuse, R11.reuse, R10.reuse ; /* 0x0000000b09047223 */
/* 0x1c0fe2000000c00a */
/*0610*/ IADD3 R7, R8.reuse, 0x20, RZ ; /* 0x0000002008077810 */
/* 0x040fe20007ffe0ff */
/*0620*/ FFMA.RM R5, R9.reuse, R11.reuse, R10.reuse ; /* 0x0000000b09057223 */
/* 0x1c0fe2000000400a */
/*0630*/ ISETP.NE.AND P2, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f45270 */
/*0640*/ LOP3.LUT R6, R4, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff04067812 */
/* 0x000fe200078ec0ff */
/*0650*/ FFMA.RP R4, R9, R11, R10 ; /* 0x0000000b09047223 */
/* 0x000fe2000000800a */
/*0660*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f25270 */
/*0670*/ IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0a08 */
/*0680*/ LOP3.LUT R6, R6, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000006067812 */
/* 0x000fe400078efcff */
/*0690*/ FSETP.NEU.FTZ.AND P0, PT, R4, R5, PT ; /* 0x000000050400720b */
/* 0x000fc40003f1d000 */
/*06a0*/ SHF.L.U32 R7, R6, R7, RZ ; /* 0x0000000706077219 */
/* 0x000fe400000006ff */
/*06b0*/ SEL R5, R8, RZ, P2 ; /* 0x000000ff08057207 */
/* 0x000fe40001000000 */
/*06c0*/ ISETP.NE.AND P1, PT, R7, RZ, P1 ; /* 0x000000ff0700720c */
/* 0x000fe40000f25270 */
/*06d0*/ SHF.R.U32.HI R5, RZ, R5, R6 ; /* 0x00000005ff057219 */
/* 0x000fe40000011606 */
/*06e0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703570 */
/*06f0*/ SHF.R.U32.HI R7, RZ, 0x1, R5 ; /* 0x00000001ff077819 */
/* 0x000fc40000011605 */
/*0700*/ SEL R4, RZ, 0x1, !P0 ; /* 0x00000001ff047807 */
/* 0x000fc80004000000 */
/*0710*/ LOP3.LUT R4, R4, 0x1, R7, 0xf8, !PT ; /* 0x0000000104047812 */
/* 0x000fc800078ef807 */
/*0720*/ LOP3.LUT R4, R4, R5, RZ, 0xc0, !PT ; /* 0x0000000504047212 */
/* 0x000fca00078ec0ff */
/*0730*/ IMAD.IADD R4, R7, 0x1, R4 ; /* 0x0000000107047824 */
/* 0x000fca00078e0204 */
/*0740*/ LOP3.LUT R3, R4, R3, RZ, 0xfc, !PT ; /* 0x0000000304037212 */
/* 0x000fe200078efcff */
/*0750*/ BRA 0x7a0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0760*/ LOP3.LUT R3, R3, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000003037812 */
/* 0x000fc800078ec0ff */
/*0770*/ LOP3.LUT R3, R3, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000003037812 */
/* 0x000fe200078efcff */
/*0780*/ BRA 0x7a0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0790*/ IMAD R3, R5, 0x800000, R3 ; /* 0x0080000005037824 */
/* 0x000fe400078e0203 */
/*07a0*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*07b0*/ BRA 0x840 ; /* 0x0000008000007947 */
/* 0x000fea0003800000 */
/*07c0*/ LOP3.LUT R3, R7, 0x80000000, R6, 0x48, !PT ; /* 0x8000000007037812 */
/* 0x000fc800078e4806 */
/*07d0*/ LOP3.LUT R3, R3, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000003037812 */
/* 0x000fe200078efcff */
/*07e0*/ BRA 0x840 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*07f0*/ LOP3.LUT R3, R7, 0x80000000, R6, 0x48, !PT ; /* 0x8000000007037812 */
/* 0x000fe200078e4806 */
/*0800*/ BRA 0x840 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*0810*/ MUFU.RSQ R3, -QNAN ; /* 0xffc0000000037908 */
/* 0x000e220000001400 */
/*0820*/ BRA 0x840 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0830*/ FADD.FTZ R3, R4, c[0x0][0x178] ; /* 0x00005e0004037621 */
/* 0x000fe40000010000 */
/*0840*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0850*/ IMAD.MOV.U32 R5, RZ, RZ, R3 ; /* 0x000000ffff057224 */
/* 0x001fe400078e0003 */
/*0860*/ IMAD.MOV.U32 R3, RZ, RZ, 0x0 ; /* 0x00000000ff037424 */
/* 0x000fc800078e00ff */
/*0870*/ RET.REL.NODEC R2 0x0 ; /* 0xfffff78002007950 */
/* 0x000fea0003c3ffff */
/*0880*/ BRA 0x880; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0890*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0900*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z28NormalizationExecutionKernelPhPfifff
.globl _Z28NormalizationExecutionKernelPhPfifff
.p2align 8
.type _Z28NormalizationExecutionKernelPhPfifff,@function
_Z28NormalizationExecutionKernelPhPfifff:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x14
v_ashrrev_i32_e32 v2, 31, v1
s_load_b32 s0, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo
global_load_u8 v0, v[3:4], off
s_waitcnt vmcnt(0)
v_cvt_f32_ubyte0_e32 v0, v0
v_subrev_f32_e32 v0, s2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v3, null, s3, s3, v0
v_div_scale_f32 v6, vcc_lo, v0, s3, v0
v_rcp_f32_e32 v4, v3
s_waitcnt_depctr 0xfff
v_fma_f32 v5, -v3, v4, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v5, v4
v_mul_f32_e32 v5, v6, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v7, -v3, v5, v6
v_fmac_f32_e32 v5, v7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v3, -v3, v5, v6
v_div_fmas_f32 v3, v3, v4, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_fixup_f32 v3, v3, s3, v0
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_f32_e32 v2, s0, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z28NormalizationExecutionKernelPhPfifff
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z28NormalizationExecutionKernelPhPfifff, .Lfunc_end0-_Z28NormalizationExecutionKernelPhPfifff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z28NormalizationExecutionKernelPhPfifff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z28NormalizationExecutionKernelPhPfifff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001510f0_00000000-6_NormalizationExecutionKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z54__device_stub__Z28NormalizationExecutionKernelPhPfifffPhPfifff
.type _Z54__device_stub__Z28NormalizationExecutionKernelPhPfifffPhPfifff, @function
_Z54__device_stub__Z28NormalizationExecutionKernelPhPfifffPhPfifff:
.LFB2051:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
movss %xmm1, 4(%rsp)
movss %xmm2, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z28NormalizationExecutionKernelPhPfifff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z54__device_stub__Z28NormalizationExecutionKernelPhPfifffPhPfifff, .-_Z54__device_stub__Z28NormalizationExecutionKernelPhPfifffPhPfifff
.globl _Z28NormalizationExecutionKernelPhPfifff
.type _Z28NormalizationExecutionKernelPhPfifff, @function
_Z28NormalizationExecutionKernelPhPfifff:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z28NormalizationExecutionKernelPhPfifffPhPfifff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z28NormalizationExecutionKernelPhPfifff, .-_Z28NormalizationExecutionKernelPhPfifff
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z28NormalizationExecutionKernelPhPfifff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z28NormalizationExecutionKernelPhPfifff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "NormalizationExecutionKernel.hip"
.globl _Z43__device_stub__NormalizationExecutionKernelPhPfifff # -- Begin function _Z43__device_stub__NormalizationExecutionKernelPhPfifff
.p2align 4, 0x90
.type _Z43__device_stub__NormalizationExecutionKernelPhPfifff,@function
_Z43__device_stub__NormalizationExecutionKernelPhPfifff: # @_Z43__device_stub__NormalizationExecutionKernelPhPfifff
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movss %xmm0, 8(%rsp)
movss %xmm1, 4(%rsp)
movss %xmm2, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z28NormalizationExecutionKernelPhPfifff, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z43__device_stub__NormalizationExecutionKernelPhPfifff, .Lfunc_end0-_Z43__device_stub__NormalizationExecutionKernelPhPfifff
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z28NormalizationExecutionKernelPhPfifff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z28NormalizationExecutionKernelPhPfifff,@object # @_Z28NormalizationExecutionKernelPhPfifff
.section .rodata,"a",@progbits
.globl _Z28NormalizationExecutionKernelPhPfifff
.p2align 3, 0x0
_Z28NormalizationExecutionKernelPhPfifff:
.quad _Z43__device_stub__NormalizationExecutionKernelPhPfifff
.size _Z28NormalizationExecutionKernelPhPfifff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z28NormalizationExecutionKernelPhPfifff"
.size .L__unnamed_1, 41
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z43__device_stub__NormalizationExecutionKernelPhPfifff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z28NormalizationExecutionKernelPhPfifff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include "sys/time.h"
#define GPU_ID 0
// #define USE_SINGLE_PRECISION /* Comment this line using "!" if you want to use double precision. */
#ifdef USE_SINGLE_PRECISION
#define DAT float
#define PRECIS 4
#else
#define DAT double
#define PRECIS 8
#endif
#define zeros(A,nx,ny,nz) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc((nx)*(ny)*(nz)*sizeof(DAT)); \
for(i=0; i < (nx)*(ny)*(nz); i++){ A##_h[i]=(DAT)0.0; } \
cudaMalloc(&A##_d ,(nx)*(ny)*(nz)*sizeof(DAT)); \
cudaMemcpy( A##_d,A##_h,(nx)*(ny)*(nz)*sizeof(DAT),cudaMemcpyHostToDevice);
#define free_all(A) free(A##_h);cudaFree(A##_d);
#define BLOCK_X 32
#define BLOCK_Y 16
#define BLOCK_Z 2
#define GRID_X 32
#define GRID_Y 64
#define GRID_Z 128
const int nx = GRID_X*BLOCK_X;
const int ny = GRID_Y*BLOCK_Y;
const int nz = GRID_Z*BLOCK_Z;
const int nt = 100;
// Timer
double timer_start = 0;
double cpu_sec(){ struct timeval tp; gettimeofday(&tp,NULL); return tp.tv_sec+1e-6*tp.tv_usec; }
void tic(){ timer_start = cpu_sec(); }
double toc(){ return cpu_sec()-timer_start; }
void tim(const char *what, double n){ double s=toc(); printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n"); }
void timPrint(const char *what, double n, int nx, int ny, int nz){
double s=toc();
printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n");
FILE*fid; fid=fopen("PERF_memcpy.dat","a"); fprintf(fid,"nx=%d ny=%d nz=%d GBs=%1.4f time_s=%1.4f \n", nx, ny, nz, n/s, s); fclose(fid);
}
void clean_cuda(){
cudaError_t ce = cudaGetLastError();
if(ce != cudaSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", cudaGetErrorString(ce)); cudaDeviceReset();}
}
__global__ void memcopy(DAT*A, DAT*B, DAT*C, const int nx,const int ny,const int nz){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension x
int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension x
if (iz<nz && iy<ny && ix<nx) A[ix + iy*nx + iz*nx*ny] = B[ix + iy*nx + iz*nx*ny] + C[ix + iy*nx + iz*nx*ny];
}
////////// main //////////
int main(){
size_t i, it, N=nx*ny*nz, mem=3*N*sizeof(DAT);
dim3 grid, block;
block.x = BLOCK_X; block.y = BLOCK_Y; block.z = BLOCK_Z;
grid.x = GRID_X; grid.y = GRID_Y; grid.z = GRID_Z;
int gpu_id=-1; gpu_id=GPU_ID; cudaSetDevice(gpu_id); cudaGetDevice(&gpu_id);
cudaDeviceReset(); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); // set L1 to prefered
printf("Process uses GPU with id %d.\n",gpu_id);
printf("%dx%dx%d, %1.3f GB, %d iterations.\n", nx,ny,nz, mem/1024./1024./1024., nt);
printf("launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n", grid.x, grid.y, grid.z, block.x, block.y, block.z);
// initializations
zeros(A, nx,ny,nz);
zeros(B, nx,ny,nz);
zeros(C, nx,ny,nz);
// time loop
for(it=0; it<nt; it++){
if (it==10){ tic(); }
memcopy<<<grid, block>>>(A_d,B_d,C_d,nx,ny,nz);
cudaDeviceSynchronize();
}
// tim("Performance", mem*(nt-3)*2/1024./1024./1024.);
timPrint("Performance", mem*(nt-10)/1024./1024./1024.,nx,ny,nz);
free_all(A);
free_all(B);
free_all(C);
clean_cuda();
return 0;
} | code for sm_80
Function : _Z7memcopyPdS_S_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Z ; /* 0x0000000000037919 */
/* 0x000e280000002700 */
/*0020*/ S2R R4, SR_TID.Z ; /* 0x0000000000047919 */
/* 0x000e280000002300 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000e680000002200 */
/*0050*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000ea80000002500 */
/*0060*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000ea20000002100 */
/*0070*/ IMAD R3, R3, c[0x0][0x8], R4 ; /* 0x0000020003037a24 */
/* 0x001fca00078e0204 */
/*0080*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x180], PT ; /* 0x0000600003007a0c */
/* 0x000fe20003f06270 */
/*0090*/ IMAD R2, R2, c[0x0][0x4], R7 ; /* 0x0000010002027a24 */
/* 0x002fca00078e0207 */
/*00a0*/ ISETP.GE.OR P0, PT, R2, c[0x0][0x17c], P0 ; /* 0x00005f0002007a0c */
/* 0x000fe20000706670 */
/*00b0*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x004fca00078e0205 */
/*00c0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*00d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00e0*/ HFMA2.MMA R9, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff097435 */
/* 0x000fe200000001ff */
/*00f0*/ IMAD R3, R3, c[0x0][0x17c], R2 ; /* 0x00005f0003037a24 */
/* 0x000fe200078e0202 */
/*0100*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0110*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */
/* 0x000fca00078e0200 */
/*0120*/ IMAD.WIDE R4, R0, R9, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fc800078e0209 */
/*0130*/ IMAD.WIDE R2, R0.reuse, R9.reuse, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x0c0fe400078e0209 */
/*0140*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1b00 */
/*0150*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*0160*/ IMAD.WIDE R8, R0, R9, c[0x0][0x160] ; /* 0x0000580000087625 */
/* 0x000fe200078e0209 */
/*0170*/ DADD R6, R4, R2 ; /* 0x0000000004067229 */
/* 0x004e0e0000000002 */
/*0180*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x001fe2000c101b04 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include "sys/time.h"
#define GPU_ID 0
// #define USE_SINGLE_PRECISION /* Comment this line using "!" if you want to use double precision. */
#ifdef USE_SINGLE_PRECISION
#define DAT float
#define PRECIS 4
#else
#define DAT double
#define PRECIS 8
#endif
#define zeros(A,nx,ny,nz) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc((nx)*(ny)*(nz)*sizeof(DAT)); \
for(i=0; i < (nx)*(ny)*(nz); i++){ A##_h[i]=(DAT)0.0; } \
cudaMalloc(&A##_d ,(nx)*(ny)*(nz)*sizeof(DAT)); \
cudaMemcpy( A##_d,A##_h,(nx)*(ny)*(nz)*sizeof(DAT),cudaMemcpyHostToDevice);
#define free_all(A) free(A##_h);cudaFree(A##_d);
#define BLOCK_X 32
#define BLOCK_Y 16
#define BLOCK_Z 2
#define GRID_X 32
#define GRID_Y 64
#define GRID_Z 128
const int nx = GRID_X*BLOCK_X;
const int ny = GRID_Y*BLOCK_Y;
const int nz = GRID_Z*BLOCK_Z;
const int nt = 100;
// Timer
double timer_start = 0;
double cpu_sec(){ struct timeval tp; gettimeofday(&tp,NULL); return tp.tv_sec+1e-6*tp.tv_usec; }
void tic(){ timer_start = cpu_sec(); }
double toc(){ return cpu_sec()-timer_start; }
void tim(const char *what, double n){ double s=toc(); printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n"); }
void timPrint(const char *what, double n, int nx, int ny, int nz){
double s=toc();
printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n");
FILE*fid; fid=fopen("PERF_memcpy.dat","a"); fprintf(fid,"nx=%d ny=%d nz=%d GBs=%1.4f time_s=%1.4f \n", nx, ny, nz, n/s, s); fclose(fid);
}
void clean_cuda(){
cudaError_t ce = cudaGetLastError();
if(ce != cudaSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", cudaGetErrorString(ce)); cudaDeviceReset();}
}
__global__ void memcopy(DAT*A, DAT*B, DAT*C, const int nx,const int ny,const int nz){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension x
int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension x
if (iz<nz && iy<ny && ix<nx) A[ix + iy*nx + iz*nx*ny] = B[ix + iy*nx + iz*nx*ny] + C[ix + iy*nx + iz*nx*ny];
}
////////// main //////////
int main(){
size_t i, it, N=nx*ny*nz, mem=3*N*sizeof(DAT);
dim3 grid, block;
block.x = BLOCK_X; block.y = BLOCK_Y; block.z = BLOCK_Z;
grid.x = GRID_X; grid.y = GRID_Y; grid.z = GRID_Z;
int gpu_id=-1; gpu_id=GPU_ID; cudaSetDevice(gpu_id); cudaGetDevice(&gpu_id);
cudaDeviceReset(); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); // set L1 to prefered
printf("Process uses GPU with id %d.\n",gpu_id);
printf("%dx%dx%d, %1.3f GB, %d iterations.\n", nx,ny,nz, mem/1024./1024./1024., nt);
printf("launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n", grid.x, grid.y, grid.z, block.x, block.y, block.z);
// initializations
zeros(A, nx,ny,nz);
zeros(B, nx,ny,nz);
zeros(C, nx,ny,nz);
// time loop
for(it=0; it<nt; it++){
if (it==10){ tic(); }
memcopy<<<grid, block>>>(A_d,B_d,C_d,nx,ny,nz);
cudaDeviceSynchronize();
}
// tim("Performance", mem*(nt-3)*2/1024./1024./1024.);
timPrint("Performance", mem*(nt-10)/1024./1024./1024.,nx,ny,nz);
free_all(A);
free_all(B);
free_all(C);
clean_cuda();
return 0;
} | .file "tmpxft_0001762b_00000000-6_memcopy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2066:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7cpu_secv
.type _Z7cpu_secv, @function
_Z7cpu_secv:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z7cpu_secv, .-_Z7cpu_secv
.globl _Z3ticv
.type _Z3ticv, @function
_Z3ticv:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z7cpu_secv
movsd %xmm0, timer_start(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z3ticv, .-_Z3ticv
.globl _Z3tocv
.type _Z3tocv, @function
_Z3tocv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z7cpu_secv
subsd timer_start(%rip), %xmm0
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z3tocv, .-_Z3tocv
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "%s: %8.3f seconds"
.LC3:
.string ", %8.3f GB/s"
.LC4:
.string "\n"
.text
.globl _Z3timPKcd
.type _Z3timPKcd, @function
_Z3timPKcd:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movsd %xmm0, (%rsp)
call _Z3tocv
movsd %xmm0, 8(%rsp)
movq %rbx, %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
movsd (%rsp), %xmm1
comisd %xmm0, %xmm1
ja .L16
.L12:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
divsd 8(%rsp), %xmm1
movapd %xmm1, %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L12
.cfi_endproc
.LFE2060:
.size _Z3timPKcd, .-_Z3timPKcd
.section .rodata.str1.1
.LC5:
.string "a"
.LC6:
.string "PERF_memcpy.dat"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "nx=%d ny=%d nz=%d GBs=%1.4f time_s=%1.4f \n"
.text
.globl _Z8timPrintPKcdiii
.type _Z8timPrintPKcdiii, @function
_Z8timPrintPKcdiii:
.LFB2061:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $24, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbx
movsd %xmm0, (%rsp)
movl %esi, %ebp
movl %edx, %r12d
movl %ecx, %r13d
call _Z3tocv
movsd %xmm0, 8(%rsp)
movq %rbx, %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
movsd (%rsp), %xmm2
comisd %xmm0, %xmm2
ja .L22
.L18:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC5(%rip), %rsi
leaq .LC6(%rip), %rdi
call fopen@PLT
movq %rax, %rbx
movsd (%rsp), %xmm0
movsd 8(%rsp), %xmm1
divsd %xmm1, %xmm0
movl %r13d, %r9d
movl %r12d, %r8d
movl %ebp, %ecx
leaq .LC7(%rip), %rdx
movl $2, %esi
movq %rax, %rdi
movl $2, %eax
call __fprintf_chk@PLT
movq %rbx, %rdi
call fclose@PLT
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore_state
divsd 8(%rsp), %xmm2
movapd %xmm2, %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L18
.cfi_endproc
.LFE2061:
.size _Z8timPrintPKcdiii, .-_Z8timPrintPKcdiii
.section .rodata.str1.8
.align 8
.LC8:
.string "ERROR launching GPU C-CUDA program: %s\n"
.text
.globl _Z10clean_cudav
.type _Z10clean_cudav, @function
_Z10clean_cudav:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call cudaGetLastError@PLT
testl %eax, %eax
jne .L26
.L23:
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaDeviceReset@PLT
jmp .L23
.cfi_endproc
.LFE2062:
.size _Z10clean_cudav, .-_Z10clean_cudav
.globl _Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii
.type _Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii, @function
_Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii:
.LFB2088:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z7memcopyPdS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii, .-_Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii
.globl _Z7memcopyPdS_S_iii
.type _Z7memcopyPdS_S_iii, @function
_Z7memcopyPdS_S_iii:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z7memcopyPdS_S_iii, .-_Z7memcopyPdS_S_iii
.section .rodata.str1.1
.LC9:
.string "Process uses GPU with id %d.\n"
.section .rodata.str1.8
.align 8
.LC11:
.string "%dx%dx%d, %1.3f GB, %d iterations.\n"
.align 8
.LC12:
.string "launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n"
.section .rodata.str1.1
.LC14:
.string "Performance"
.text
.globl main
.type main, @function
main:
.LFB2063:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $0, 4(%rsp)
movl $0, %edi
call cudaSetDevice@PLT
leaq 4(%rsp), %rdi
call cudaGetDevice@PLT
call cudaDeviceReset@PLT
movl $2, %edi
call cudaDeviceSetCacheConfig@PLT
movl 4(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $100, %r9d
movsd .LC10(%rip), %xmm0
movl $256, %r8d
movl $1024, %ecx
movl $1024, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pushq $2
.cfi_def_cfa_offset 120
pushq $16
.cfi_def_cfa_offset 128
movl $32, %r9d
movl $128, %r8d
movl $64, %ecx
movl $32, %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2147483648, %ebx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
leaq (%rax,%rbx), %rdx
addq $16, %rsp
.cfi_def_cfa_offset 112
.L36:
movq $0x000000000, (%rax)
addq $8, %rax
cmpq %rdx, %rax
jne .L36
leaq 8(%rsp), %rdi
movl $2147483648, %ebx
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
leaq (%rax,%rbx), %rdx
.L37:
movq $0x000000000, (%rax)
addq $8, %rax
cmpq %rdx, %rax
jne .L37
leaq 16(%rsp), %rdi
movl $2147483648, %ebx
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r13
leaq (%rax,%rbx), %rdx
.L38:
movq $0x000000000, (%rax)
addq $8, %rax
cmpq %rdx, %rax
jne .L38
leaq 24(%rsp), %rdi
movl $2147483648, %ebx
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %ebx
jmp .L44
.L53:
call _Z3ticv
movl $32, 32(%rsp)
movl $64, 36(%rsp)
movl $32, 44(%rsp)
movl $16, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $2, %ecx
movq 32(%rsp), %rdi
movl $128, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L41
call cudaDeviceSynchronize@PLT
addq $1, %rbx
jmp .L44
.L41:
movl $256, %r9d
movl $1024, %r8d
movl $1024, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii
.L42:
call cudaDeviceSynchronize@PLT
addq $1, %rbx
cmpq $100, %rbx
je .L52
.L44:
cmpq $10, %rbx
je .L53
movl $32, 32(%rsp)
movl $64, 36(%rsp)
movl $32, 44(%rsp)
movl $16, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $2, %ecx
movq 32(%rsp), %rdi
movl $128, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L42
jmp .L41
.L52:
movl $256, %ecx
movl $1024, %edx
movl $1024, %esi
movsd .LC13(%rip), %xmm0
leaq .LC14(%rip), %rdi
call _Z8timPrintPKcdiii
movq %rbp, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq %r13, %rdi
call free@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
call _Z10clean_cudav
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L54
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size main, .-main
.section .rodata.str1.1
.LC15:
.string "_Z7memcopyPdS_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z7memcopyPdS_S_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl timer_start
.bss
.align 8
.type timer_start, @object
.size timer_start, 8
timer_start:
.zero 8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1598689907
.long 1051772663
.align 8
.LC10:
.long 0
.long 1075314688
.align 8
.LC13:
.long 0
.long 1082187776
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include "sys/time.h"
#define GPU_ID 0
// #define USE_SINGLE_PRECISION /* Comment this line using "!" if you want to use double precision. */
#ifdef USE_SINGLE_PRECISION
#define DAT float
#define PRECIS 4
#else
#define DAT double
#define PRECIS 8
#endif
#define zeros(A,nx,ny,nz) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc((nx)*(ny)*(nz)*sizeof(DAT)); \
for(i=0; i < (nx)*(ny)*(nz); i++){ A##_h[i]=(DAT)0.0; } \
cudaMalloc(&A##_d ,(nx)*(ny)*(nz)*sizeof(DAT)); \
cudaMemcpy( A##_d,A##_h,(nx)*(ny)*(nz)*sizeof(DAT),cudaMemcpyHostToDevice);
#define free_all(A) free(A##_h);cudaFree(A##_d);
#define BLOCK_X 32
#define BLOCK_Y 16
#define BLOCK_Z 2
#define GRID_X 32
#define GRID_Y 64
#define GRID_Z 128
const int nx = GRID_X*BLOCK_X;
const int ny = GRID_Y*BLOCK_Y;
const int nz = GRID_Z*BLOCK_Z;
const int nt = 100;
// Timer
double timer_start = 0;
double cpu_sec(){ struct timeval tp; gettimeofday(&tp,NULL); return tp.tv_sec+1e-6*tp.tv_usec; }
void tic(){ timer_start = cpu_sec(); }
double toc(){ return cpu_sec()-timer_start; }
void tim(const char *what, double n){ double s=toc(); printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n"); }
void timPrint(const char *what, double n, int nx, int ny, int nz){
double s=toc();
printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n");
FILE*fid; fid=fopen("PERF_memcpy.dat","a"); fprintf(fid,"nx=%d ny=%d nz=%d GBs=%1.4f time_s=%1.4f \n", nx, ny, nz, n/s, s); fclose(fid);
}
void clean_cuda(){
cudaError_t ce = cudaGetLastError();
if(ce != cudaSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", cudaGetErrorString(ce)); cudaDeviceReset();}
}
__global__ void memcopy(DAT*A, DAT*B, DAT*C, const int nx,const int ny,const int nz){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension x
int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension x
if (iz<nz && iy<ny && ix<nx) A[ix + iy*nx + iz*nx*ny] = B[ix + iy*nx + iz*nx*ny] + C[ix + iy*nx + iz*nx*ny];
}
////////// main //////////
int main(){
size_t i, it, N=nx*ny*nz, mem=3*N*sizeof(DAT);
dim3 grid, block;
block.x = BLOCK_X; block.y = BLOCK_Y; block.z = BLOCK_Z;
grid.x = GRID_X; grid.y = GRID_Y; grid.z = GRID_Z;
int gpu_id=-1; gpu_id=GPU_ID; cudaSetDevice(gpu_id); cudaGetDevice(&gpu_id);
cudaDeviceReset(); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); // set L1 to prefered
printf("Process uses GPU with id %d.\n",gpu_id);
printf("%dx%dx%d, %1.3f GB, %d iterations.\n", nx,ny,nz, mem/1024./1024./1024., nt);
printf("launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n", grid.x, grid.y, grid.z, block.x, block.y, block.z);
// initializations
zeros(A, nx,ny,nz);
zeros(B, nx,ny,nz);
zeros(C, nx,ny,nz);
// time loop
for(it=0; it<nt; it++){
if (it==10){ tic(); }
memcopy<<<grid, block>>>(A_d,B_d,C_d,nx,ny,nz);
cudaDeviceSynchronize();
}
// tim("Performance", mem*(nt-3)*2/1024./1024./1024.);
timPrint("Performance", mem*(nt-10)/1024./1024./1024.,nx,ny,nz);
free_all(A);
free_all(B);
free_all(C);
clean_cuda();
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include "sys/time.h"
#define GPU_ID 0
// #define USE_SINGLE_PRECISION /* Comment this line using "!" if you want to use double precision. */
#ifdef USE_SINGLE_PRECISION
#define DAT float
#define PRECIS 4
#else
#define DAT double
#define PRECIS 8
#endif
#define zeros(A,nx,ny,nz) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc((nx)*(ny)*(nz)*sizeof(DAT)); \
for(i=0; i < (nx)*(ny)*(nz); i++){ A##_h[i]=(DAT)0.0; } \
hipMalloc(&A##_d ,(nx)*(ny)*(nz)*sizeof(DAT)); \
hipMemcpy( A##_d,A##_h,(nx)*(ny)*(nz)*sizeof(DAT),hipMemcpyHostToDevice);
#define free_all(A) free(A##_h);hipFree(A##_d);
#define BLOCK_X 32
#define BLOCK_Y 16
#define BLOCK_Z 2
#define GRID_X 32
#define GRID_Y 64
#define GRID_Z 128
const int nx = GRID_X*BLOCK_X;
const int ny = GRID_Y*BLOCK_Y;
const int nz = GRID_Z*BLOCK_Z;
const int nt = 100;
// Timer
double timer_start = 0;
double cpu_sec(){ struct timeval tp; gettimeofday(&tp,NULL); return tp.tv_sec+1e-6*tp.tv_usec; }
void tic(){ timer_start = cpu_sec(); }
double toc(){ return cpu_sec()-timer_start; }
void tim(const char *what, double n){ double s=toc(); printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n"); }
void timPrint(const char *what, double n, int nx, int ny, int nz){
double s=toc();
printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n");
FILE*fid; fid=fopen("PERF_memcpy.dat","a"); fprintf(fid,"nx=%d ny=%d nz=%d GBs=%1.4f time_s=%1.4f \n", nx, ny, nz, n/s, s); fclose(fid);
}
void clean_cuda(){
hipError_t ce = hipGetLastError();
if(ce != hipSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", hipGetErrorString(ce)); hipDeviceReset();}
}
__global__ void memcopy(DAT*A, DAT*B, DAT*C, const int nx,const int ny,const int nz){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension x
int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension x
if (iz<nz && iy<ny && ix<nx) A[ix + iy*nx + iz*nx*ny] = B[ix + iy*nx + iz*nx*ny] + C[ix + iy*nx + iz*nx*ny];
}
////////// main //////////
int main(){
size_t i, it, N=nx*ny*nz, mem=3*N*sizeof(DAT);
dim3 grid, block;
block.x = BLOCK_X; block.y = BLOCK_Y; block.z = BLOCK_Z;
grid.x = GRID_X; grid.y = GRID_Y; grid.z = GRID_Z;
int gpu_id=-1; gpu_id=GPU_ID; hipSetDevice(gpu_id); hipGetDevice(&gpu_id);
hipDeviceReset(); hipDeviceSetCacheConfig(hipFuncCachePreferL1); // set L1 to prefered
printf("Process uses GPU with id %d.\n",gpu_id);
printf("%dx%dx%d, %1.3f GB, %d iterations.\n", nx,ny,nz, mem/1024./1024./1024., nt);
printf("launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n", grid.x, grid.y, grid.z, block.x, block.y, block.z);
// initializations
zeros(A, nx,ny,nz);
zeros(B, nx,ny,nz);
zeros(C, nx,ny,nz);
// time loop
for(it=0; it<nt; it++){
if (it==10){ tic(); }
memcopy<<<grid, block>>>(A_d,B_d,C_d,nx,ny,nz);
hipDeviceSynchronize();
}
// tim("Performance", mem*(nt-3)*2/1024./1024./1024.);
timPrint("Performance", mem*(nt-10)/1024./1024./1024.,nx,ny,nz);
free_all(A);
free_all(B);
free_all(C);
clean_cuda();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include "sys/time.h"
#define GPU_ID 0
// #define USE_SINGLE_PRECISION /* Comment this line using "!" if you want to use double precision. */
#ifdef USE_SINGLE_PRECISION
#define DAT float
#define PRECIS 4
#else
#define DAT double
#define PRECIS 8
#endif
#define zeros(A,nx,ny,nz) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc((nx)*(ny)*(nz)*sizeof(DAT)); \
for(i=0; i < (nx)*(ny)*(nz); i++){ A##_h[i]=(DAT)0.0; } \
hipMalloc(&A##_d ,(nx)*(ny)*(nz)*sizeof(DAT)); \
hipMemcpy( A##_d,A##_h,(nx)*(ny)*(nz)*sizeof(DAT),hipMemcpyHostToDevice);
#define free_all(A) free(A##_h);hipFree(A##_d);
#define BLOCK_X 32
#define BLOCK_Y 16
#define BLOCK_Z 2
#define GRID_X 32
#define GRID_Y 64
#define GRID_Z 128
const int nx = GRID_X*BLOCK_X;
const int ny = GRID_Y*BLOCK_Y;
const int nz = GRID_Z*BLOCK_Z;
const int nt = 100;
// Timer
double timer_start = 0;
double cpu_sec(){ struct timeval tp; gettimeofday(&tp,NULL); return tp.tv_sec+1e-6*tp.tv_usec; }
void tic(){ timer_start = cpu_sec(); }
double toc(){ return cpu_sec()-timer_start; }
void tim(const char *what, double n){ double s=toc(); printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n"); }
void timPrint(const char *what, double n, int nx, int ny, int nz){
double s=toc();
printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n");
FILE*fid; fid=fopen("PERF_memcpy.dat","a"); fprintf(fid,"nx=%d ny=%d nz=%d GBs=%1.4f time_s=%1.4f \n", nx, ny, nz, n/s, s); fclose(fid);
}
void clean_cuda(){
hipError_t ce = hipGetLastError();
if(ce != hipSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", hipGetErrorString(ce)); hipDeviceReset();}
}
__global__ void memcopy(DAT*A, DAT*B, DAT*C, const int nx,const int ny,const int nz){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension x
int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension x
if (iz<nz && iy<ny && ix<nx) A[ix + iy*nx + iz*nx*ny] = B[ix + iy*nx + iz*nx*ny] + C[ix + iy*nx + iz*nx*ny];
}
////////// main //////////
int main(){
size_t i, it, N=nx*ny*nz, mem=3*N*sizeof(DAT);
dim3 grid, block;
block.x = BLOCK_X; block.y = BLOCK_Y; block.z = BLOCK_Z;
grid.x = GRID_X; grid.y = GRID_Y; grid.z = GRID_Z;
int gpu_id=-1; gpu_id=GPU_ID; hipSetDevice(gpu_id); hipGetDevice(&gpu_id);
hipDeviceReset(); hipDeviceSetCacheConfig(hipFuncCachePreferL1); // set L1 to prefered
printf("Process uses GPU with id %d.\n",gpu_id);
printf("%dx%dx%d, %1.3f GB, %d iterations.\n", nx,ny,nz, mem/1024./1024./1024., nt);
printf("launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n", grid.x, grid.y, grid.z, block.x, block.y, block.z);
// initializations
zeros(A, nx,ny,nz);
zeros(B, nx,ny,nz);
zeros(C, nx,ny,nz);
// time loop
for(it=0; it<nt; it++){
if (it==10){ tic(); }
memcopy<<<grid, block>>>(A_d,B_d,C_d,nx,ny,nz);
hipDeviceSynchronize();
}
// tim("Performance", mem*(nt-3)*2/1024./1024./1024.);
timPrint("Performance", mem*(nt-10)/1024./1024./1024.,nx,ny,nz);
free_all(A);
free_all(B);
free_all(C);
clean_cuda();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7memcopyPdS_S_iii
.globl _Z7memcopyPdS_S_iii
.p2align 8
.type _Z7memcopyPdS_S_iii,@function
_Z7memcopyPdS_S_iii:
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x34
s_load_b32 s6, s[0:1], 0x20
s_load_b64 s[4:5], s[0:1], 0x18
v_and_b32_e32 v5, 0x3ff, v0
v_bfe_u32 v1, v0, 10, 10
v_bfe_u32 v0, v0, 20, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_and_b32 s7, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s15, s3, v[0:1]
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[3:4], null, s14, s2, v[1:2]
v_mad_u64_u32 v[0:1], null, s13, s7, v[5:6]
v_cmp_gt_i32_e32 vcc_lo, s6, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_gt_i32_e64 s2, s5, v3
v_cmp_gt_i32_e64 s3, s4, v0
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 s2, s2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_mad_u64_u32 v[4:5], null, v2, s5, v[3:4]
s_clause 0x1
s_load_b128 s[8:11], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, v4, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s10, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s8, v0
global_load_b64 v[2:3], v[2:3], off
global_load_b64 v[4:5], v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[4:5]
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7memcopyPdS_S_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7memcopyPdS_S_iii, .Lfunc_end0-_Z7memcopyPdS_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7memcopyPdS_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7memcopyPdS_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include "sys/time.h"
#define GPU_ID 0
// #define USE_SINGLE_PRECISION /* Comment this line using "!" if you want to use double precision. */
#ifdef USE_SINGLE_PRECISION
#define DAT float
#define PRECIS 4
#else
#define DAT double
#define PRECIS 8
#endif
#define zeros(A,nx,ny,nz) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc((nx)*(ny)*(nz)*sizeof(DAT)); \
for(i=0; i < (nx)*(ny)*(nz); i++){ A##_h[i]=(DAT)0.0; } \
hipMalloc(&A##_d ,(nx)*(ny)*(nz)*sizeof(DAT)); \
hipMemcpy( A##_d,A##_h,(nx)*(ny)*(nz)*sizeof(DAT),hipMemcpyHostToDevice);
#define free_all(A) free(A##_h);hipFree(A##_d);
#define BLOCK_X 32
#define BLOCK_Y 16
#define BLOCK_Z 2
#define GRID_X 32
#define GRID_Y 64
#define GRID_Z 128
const int nx = GRID_X*BLOCK_X;
const int ny = GRID_Y*BLOCK_Y;
const int nz = GRID_Z*BLOCK_Z;
const int nt = 100;
// Timer
double timer_start = 0;
double cpu_sec(){ struct timeval tp; gettimeofday(&tp,NULL); return tp.tv_sec+1e-6*tp.tv_usec; }
void tic(){ timer_start = cpu_sec(); }
double toc(){ return cpu_sec()-timer_start; }
void tim(const char *what, double n){ double s=toc(); printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n"); }
void timPrint(const char *what, double n, int nx, int ny, int nz){
double s=toc();
printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n");
FILE*fid; fid=fopen("PERF_memcpy.dat","a"); fprintf(fid,"nx=%d ny=%d nz=%d GBs=%1.4f time_s=%1.4f \n", nx, ny, nz, n/s, s); fclose(fid);
}
void clean_cuda(){
hipError_t ce = hipGetLastError();
if(ce != hipSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", hipGetErrorString(ce)); hipDeviceReset();}
}
__global__ void memcopy(DAT*A, DAT*B, DAT*C, const int nx,const int ny,const int nz){
int ix = blockIdx.x*blockDim.x + threadIdx.x; // thread ID, dimension x
int iy = blockIdx.y*blockDim.y + threadIdx.y; // thread ID, dimension x
int iz = blockIdx.z*blockDim.z + threadIdx.z; // thread ID, dimension x
if (iz<nz && iy<ny && ix<nx) A[ix + iy*nx + iz*nx*ny] = B[ix + iy*nx + iz*nx*ny] + C[ix + iy*nx + iz*nx*ny];
}
////////// main //////////
int main(){
size_t i, it, N=nx*ny*nz, mem=3*N*sizeof(DAT);
dim3 grid, block;
block.x = BLOCK_X; block.y = BLOCK_Y; block.z = BLOCK_Z;
grid.x = GRID_X; grid.y = GRID_Y; grid.z = GRID_Z;
int gpu_id=-1; gpu_id=GPU_ID; hipSetDevice(gpu_id); hipGetDevice(&gpu_id);
hipDeviceReset(); hipDeviceSetCacheConfig(hipFuncCachePreferL1); // set L1 to prefered
printf("Process uses GPU with id %d.\n",gpu_id);
printf("%dx%dx%d, %1.3f GB, %d iterations.\n", nx,ny,nz, mem/1024./1024./1024., nt);
printf("launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n", grid.x, grid.y, grid.z, block.x, block.y, block.z);
// initializations
zeros(A, nx,ny,nz);
zeros(B, nx,ny,nz);
zeros(C, nx,ny,nz);
// time loop
for(it=0; it<nt; it++){
if (it==10){ tic(); }
memcopy<<<grid, block>>>(A_d,B_d,C_d,nx,ny,nz);
hipDeviceSynchronize();
}
// tim("Performance", mem*(nt-3)*2/1024./1024./1024.);
timPrint("Performance", mem*(nt-10)/1024./1024./1024.,nx,ny,nz);
free_all(A);
free_all(B);
free_all(C);
clean_cuda();
return 0;
} | .text
.file "memcopy.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z7cpu_secv
.LCPI0_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z7cpu_secv
.p2align 4, 0x90
.type _Z7cpu_secv,@function
_Z7cpu_secv: # @_Z7cpu_secv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z7cpu_secv, .Lfunc_end0-_Z7cpu_secv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z3ticv
.LCPI1_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z3ticv
.p2align 4, 0x90
.type _Z3ticv,@function
_Z3ticv: # @_Z3ticv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm0
cvtsi2sdq 16(%rsp), %xmm1
mulsd .LCPI1_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, timer_start(%rip)
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z3ticv, .Lfunc_end1-_Z3ticv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z3tocv
.LCPI2_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z3tocv
.p2align 4, 0x90
.type _Z3tocv,@function
_Z3tocv: # @_Z3tocv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI2_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd timer_start(%rip), %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z3tocv, .Lfunc_end2-_Z3tocv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z3timPKcd
.LCPI3_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z3timPKcd
.p2align 4, 0x90
.type _Z3timPKcd,@function
_Z3timPKcd: # @_Z3timPKcd
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
movsd %xmm0, 8(%rsp) # 8-byte Spill
movq %rdi, %rbx
leaq 16(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 16(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
mulsd .LCPI3_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd timer_start(%rip), %xmm0
movl $.L.str, %edi
movq %rbx, %rsi
movsd %xmm0, (%rsp) # 8-byte Spill
movb $1, %al
callq printf
xorpd %xmm0, %xmm0
movsd 8(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
jbe .LBB3_2
# %bb.1:
movapd %xmm1, %xmm0
divsd (%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.1, %edi
movb $1, %al
callq printf
.LBB3_2:
movl $10, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp putchar@PLT # TAILCALL
.Lfunc_end3:
.size _Z3timPKcd, .Lfunc_end3-_Z3timPKcd
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z8timPrintPKcdiii
.LCPI4_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z8timPrintPKcdiii
.p2align 4, 0x90
.type _Z8timPrintPKcdiii,@function
_Z8timPrintPKcdiii: # @_Z8timPrintPKcdiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $40, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %ebx
movl %edx, %ebp
movl %esi, %r14d
movsd %xmm0, 8(%rsp) # 8-byte Spill
movq %rdi, %r15
leaq 24(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 24(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 32(%rsp), %xmm0
mulsd .LCPI4_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd timer_start(%rip), %xmm0
movsd %xmm0, 16(%rsp) # 8-byte Spill
movl $.L.str, %edi
movq %r15, %rsi
movb $1, %al
callq printf
xorpd %xmm0, %xmm0
movsd 8(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
divsd 16(%rsp), %xmm1 # 8-byte Folded Reload
movsd %xmm1, 8(%rsp) # 8-byte Spill
jbe .LBB4_2
# %bb.1:
movl $.L.str.1, %edi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
.LBB4_2:
movl $10, %edi
callq putchar@PLT
movl $.L.str.3, %edi
movl $.L.str.4, %esi
callq fopen
movq %rax, %r15
movl $.L.str.5, %esi
movq %rax, %rdi
movl %r14d, %edx
movl %ebp, %ecx
movl %ebx, %r8d
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 16(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $2, %al
callq fprintf
movq %r15, %rdi
addq $40, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp fclose # TAILCALL
.Lfunc_end4:
.size _Z8timPrintPKcdiii, .Lfunc_end4-_Z8timPrintPKcdiii
.cfi_endproc
# -- End function
.globl _Z10clean_cudav # -- Begin function _Z10clean_cudav
.p2align 4, 0x90
.type _Z10clean_cudav,@function
_Z10clean_cudav: # @_Z10clean_cudav
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq hipGetLastError
testl %eax, %eax
je .LBB5_1
# %bb.2:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.6, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
popq %rax
.cfi_def_cfa_offset 8
jmp hipDeviceReset # TAILCALL
.LBB5_1:
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z10clean_cudav, .Lfunc_end5-_Z10clean_cudav
.cfi_endproc
# -- End function
.globl _Z22__device_stub__memcopyPdS_S_iii # -- Begin function _Z22__device_stub__memcopyPdS_S_iii
.p2align 4, 0x90
.type _Z22__device_stub__memcopyPdS_S_iii,@function
_Z22__device_stub__memcopyPdS_S_iii: # @_Z22__device_stub__memcopyPdS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7memcopyPdS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end6:
.size _Z22__device_stub__memcopyPdS_S_iii, .Lfunc_end6-_Z22__device_stub__memcopyPdS_S_iii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI7_0:
.quad 0x4018000000000000 # double 6
.LCPI7_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI7_2:
.quad 0x4080e00000000000 # double 540
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $0, 12(%rsp)
xorl %edi, %edi
callq hipSetDevice
leaq 12(%rsp), %rdi
callq hipGetDevice
callq hipDeviceReset
movl $2, %edi
callq hipDeviceSetCacheConfig
movl 12(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movsd .LCPI7_0(%rip), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.8, %edi
movl $1024, %esi # imm = 0x400
movl $1024, %edx # imm = 0x400
movl $256, %ecx # imm = 0x100
movl $100, %r8d
movb $1, %al
callq printf
subq $8, %rsp
.cfi_adjust_cfa_offset 8
movl $.L.str.9, %edi
movl $32, %esi
movl $64, %edx
movl $128, %ecx
movl $32, %r8d
movl $16, %r9d
xorl %eax, %eax
pushq $2
.cfi_adjust_cfa_offset 8
callq printf
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $2147483648, %edi # imm = 0x80000000
callq malloc
movq %rax, %rbx
movl $2147483648, %edx # imm = 0x80000000
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 32(%rsp), %rdi
movl $2147483648, %esi # imm = 0x80000000
callq hipMalloc
movq 32(%rsp), %rdi
movl $2147483648, %edx # imm = 0x80000000
movq %rbx, 136(%rsp) # 8-byte Spill
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl $2147483648, %edi # imm = 0x80000000
callq malloc
movq %rax, %rbx
movl $2147483648, %edx # imm = 0x80000000
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 24(%rsp), %rdi
movl $2147483648, %esi # imm = 0x80000000
callq hipMalloc
movq 24(%rsp), %rdi
movl $2147483648, %edx # imm = 0x80000000
movq %rbx, 128(%rsp) # 8-byte Spill
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl $2147483648, %edi # imm = 0x80000000
callq malloc
movq %rax, %rbx
movl $2147483648, %edx # imm = 0x80000000
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 16(%rsp), %rdi
movl $2147483648, %esi # imm = 0x80000000
callq hipMalloc
movq 16(%rsp), %rdi
movl $2147483648, %edx # imm = 0x80000000
movq %rbx, 120(%rsp) # 8-byte Spill
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl $100, %r14d
leaq 64(%rsp), %r12
movabsq $274877906976, %r13 # imm = 0x4000000020
movabsq $68719476768, %rbp # imm = 0x1000000020
leaq 152(%rsp), %r15
leaq 144(%rsp), %rbx
jmp .LBB7_1
.p2align 4, 0x90
.LBB7_3: # in Loop: Header=BB7_1 Depth=1
movq %r13, %rdi
movl $128, %esi
movq %rbp, %rdx
movl $2, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
je .LBB7_4
.LBB7_5: # in Loop: Header=BB7_1 Depth=1
callq hipDeviceSynchronize
decq %r14
je .LBB7_6
.LBB7_1: # =>This Inner Loop Header: Depth=1
cmpq $90, %r14
jne .LBB7_3
# %bb.2: # in Loop: Header=BB7_1 Depth=1
movq %r12, %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 64(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 72(%rsp), %xmm1
mulsd .LCPI7_1(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, timer_start(%rip)
jmp .LBB7_3
.p2align 4, 0x90
.LBB7_4: # in Loop: Header=BB7_1 Depth=1
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 208(%rsp)
movq %rcx, 200(%rsp)
movq %rdx, 192(%rsp)
movl $1024, 52(%rsp) # imm = 0x400
movl $1024, 48(%rsp) # imm = 0x400
movl $256, 44(%rsp) # imm = 0x100
leaq 208(%rsp), %rax
movq %rax, 64(%rsp)
leaq 200(%rsp), %rax
movq %rax, 72(%rsp)
leaq 192(%rsp), %rax
movq %rax, 80(%rsp)
leaq 52(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 44(%rsp), %rax
movq %rax, 104(%rsp)
leaq 176(%rsp), %rdi
leaq 160(%rsp), %rsi
movq %r15, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 176(%rsp), %rsi
movl 184(%rsp), %edx
movq 160(%rsp), %rcx
movl 168(%rsp), %r8d
movl $_Z7memcopyPdS_S_iii, %edi
movq %r12, %r9
pushq 144(%rsp)
.cfi_adjust_cfa_offset 8
pushq 160(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB7_5
.LBB7_6:
leaq 64(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 64(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 72(%rsp), %xmm0
mulsd .LCPI7_1(%rip), %xmm0
addsd %xmm1, %xmm0
subsd timer_start(%rip), %xmm0
movsd %xmm0, 56(%rsp) # 8-byte Spill
movl $.L.str, %edi
movl $.L.str.10, %esi
movb $1, %al
callq printf
movsd .LCPI7_2(%rip), %xmm0 # xmm0 = mem[0],zero
divsd 56(%rsp), %xmm0 # 8-byte Folded Reload
movsd %xmm0, 112(%rsp) # 8-byte Spill
movl $.L.str.1, %edi
movb $1, %al
callq printf
movl $10, %edi
callq putchar@PLT
movl $.L.str.3, %edi
movl $.L.str.4, %esi
callq fopen
movq %rax, %rbx
movl $.L.str.5, %esi
movq %rax, %rdi
movl $1024, %edx # imm = 0x400
movl $1024, %ecx # imm = 0x400
movl $256, %r8d # imm = 0x100
movsd 112(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 56(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $2, %al
callq fprintf
movq %rbx, %rdi
callq fclose
movq 136(%rsp), %rdi # 8-byte Reload
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 128(%rsp), %rdi # 8-byte Reload
callq free
movq 24(%rsp), %rdi
callq hipFree
movq 120(%rsp), %rdi # 8-byte Reload
callq free
movq 16(%rsp), %rdi
callq hipFree
callq hipGetLastError
testl %eax, %eax
je .LBB7_8
# %bb.7:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.6, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
callq hipDeviceReset
.LBB7_8: # %_Z10clean_cudav.exit
xorl %eax, %eax
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size main, .Lfunc_end7-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7memcopyPdS_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type timer_start,@object # @timer_start
.bss
.globl timer_start
.p2align 3, 0x0
timer_start:
.quad 0x0000000000000000 # double 0
.size timer_start, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%s: %8.3f seconds"
.size .L.str, 18
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz ", %8.3f GB/s"
.size .L.str.1, 13
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "PERF_memcpy.dat"
.size .L.str.3, 16
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "a"
.size .L.str.4, 2
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "nx=%d ny=%d nz=%d GBs=%1.4f time_s=%1.4f \n"
.size .L.str.5, 44
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "ERROR launching GPU C-CUDA program: %s\n"
.size .L.str.6, 40
.type _Z7memcopyPdS_S_iii,@object # @_Z7memcopyPdS_S_iii
.section .rodata,"a",@progbits
.globl _Z7memcopyPdS_S_iii
.p2align 3, 0x0
_Z7memcopyPdS_S_iii:
.quad _Z22__device_stub__memcopyPdS_S_iii
.size _Z7memcopyPdS_S_iii, 8
.type .L.str.7,@object # @.str.7
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.7:
.asciz "Process uses GPU with id %d.\n"
.size .L.str.7, 30
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%dx%dx%d, %1.3f GB, %d iterations.\n"
.size .L.str.8, 36
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n"
.size .L.str.9, 49
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Performance"
.size .L.str.10, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7memcopyPdS_S_iii"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__memcopyPdS_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7memcopyPdS_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z7memcopyPdS_S_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Z ; /* 0x0000000000037919 */
/* 0x000e280000002700 */
/*0020*/ S2R R4, SR_TID.Z ; /* 0x0000000000047919 */
/* 0x000e280000002300 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000e680000002200 */
/*0050*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000ea80000002500 */
/*0060*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000ea20000002100 */
/*0070*/ IMAD R3, R3, c[0x0][0x8], R4 ; /* 0x0000020003037a24 */
/* 0x001fca00078e0204 */
/*0080*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x180], PT ; /* 0x0000600003007a0c */
/* 0x000fe20003f06270 */
/*0090*/ IMAD R2, R2, c[0x0][0x4], R7 ; /* 0x0000010002027a24 */
/* 0x002fca00078e0207 */
/*00a0*/ ISETP.GE.OR P0, PT, R2, c[0x0][0x17c], P0 ; /* 0x00005f0002007a0c */
/* 0x000fe20000706670 */
/*00b0*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x004fca00078e0205 */
/*00c0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*00d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00e0*/ HFMA2.MMA R9, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff097435 */
/* 0x000fe200000001ff */
/*00f0*/ IMAD R3, R3, c[0x0][0x17c], R2 ; /* 0x00005f0003037a24 */
/* 0x000fe200078e0202 */
/*0100*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0110*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */
/* 0x000fca00078e0200 */
/*0120*/ IMAD.WIDE R4, R0, R9, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fc800078e0209 */
/*0130*/ IMAD.WIDE R2, R0.reuse, R9.reuse, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x0c0fe400078e0209 */
/*0140*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1b00 */
/*0150*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*0160*/ IMAD.WIDE R8, R0, R9, c[0x0][0x160] ; /* 0x0000580000087625 */
/* 0x000fe200078e0209 */
/*0170*/ DADD R6, R4, R2 ; /* 0x0000000004067229 */
/* 0x004e0e0000000002 */
/*0180*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x001fe2000c101b04 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ BRA 0x1a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7memcopyPdS_S_iii
.globl _Z7memcopyPdS_S_iii
.p2align 8
.type _Z7memcopyPdS_S_iii,@function
_Z7memcopyPdS_S_iii:
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x34
s_load_b32 s6, s[0:1], 0x20
s_load_b64 s[4:5], s[0:1], 0x18
v_and_b32_e32 v5, 0x3ff, v0
v_bfe_u32 v1, v0, 10, 10
v_bfe_u32 v0, v0, 20, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_and_b32 s7, s2, 0xffff
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s15, s3, v[0:1]
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[3:4], null, s14, s2, v[1:2]
v_mad_u64_u32 v[0:1], null, s13, s7, v[5:6]
v_cmp_gt_i32_e32 vcc_lo, s6, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_gt_i32_e64 s2, s5, v3
v_cmp_gt_i32_e64 s3, s4, v0
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 s2, s2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_mad_u64_u32 v[4:5], null, v2, s5, v[3:4]
s_clause 0x1
s_load_b128 s[8:11], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, v4, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s10, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s8, v0
global_load_b64 v[2:3], v[2:3], off
global_load_b64 v[4:5], v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[4:5]
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7memcopyPdS_S_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7memcopyPdS_S_iii, .Lfunc_end0-_Z7memcopyPdS_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7memcopyPdS_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7memcopyPdS_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0001762b_00000000-6_memcopy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2066:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7cpu_secv
.type _Z7cpu_secv, @function
_Z7cpu_secv:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z7cpu_secv, .-_Z7cpu_secv
.globl _Z3ticv
.type _Z3ticv, @function
_Z3ticv:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z7cpu_secv
movsd %xmm0, timer_start(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z3ticv, .-_Z3ticv
.globl _Z3tocv
.type _Z3tocv, @function
_Z3tocv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z7cpu_secv
subsd timer_start(%rip), %xmm0
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z3tocv, .-_Z3tocv
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "%s: %8.3f seconds"
.LC3:
.string ", %8.3f GB/s"
.LC4:
.string "\n"
.text
.globl _Z3timPKcd
.type _Z3timPKcd, @function
_Z3timPKcd:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movsd %xmm0, (%rsp)
call _Z3tocv
movsd %xmm0, 8(%rsp)
movq %rbx, %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
movsd (%rsp), %xmm1
comisd %xmm0, %xmm1
ja .L16
.L12:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
divsd 8(%rsp), %xmm1
movapd %xmm1, %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L12
.cfi_endproc
.LFE2060:
.size _Z3timPKcd, .-_Z3timPKcd
.section .rodata.str1.1
.LC5:
.string "a"
.LC6:
.string "PERF_memcpy.dat"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "nx=%d ny=%d nz=%d GBs=%1.4f time_s=%1.4f \n"
.text
.globl _Z8timPrintPKcdiii
.type _Z8timPrintPKcdiii, @function
_Z8timPrintPKcdiii:
.LFB2061:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $24, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbx
movsd %xmm0, (%rsp)
movl %esi, %ebp
movl %edx, %r12d
movl %ecx, %r13d
call _Z3tocv
movsd %xmm0, 8(%rsp)
movq %rbx, %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
movsd (%rsp), %xmm2
comisd %xmm0, %xmm2
ja .L22
.L18:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC5(%rip), %rsi
leaq .LC6(%rip), %rdi
call fopen@PLT
movq %rax, %rbx
movsd (%rsp), %xmm0
movsd 8(%rsp), %xmm1
divsd %xmm1, %xmm0
movl %r13d, %r9d
movl %r12d, %r8d
movl %ebp, %ecx
leaq .LC7(%rip), %rdx
movl $2, %esi
movq %rax, %rdi
movl $2, %eax
call __fprintf_chk@PLT
movq %rbx, %rdi
call fclose@PLT
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore_state
divsd 8(%rsp), %xmm2
movapd %xmm2, %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
jmp .L18
.cfi_endproc
.LFE2061:
.size _Z8timPrintPKcdiii, .-_Z8timPrintPKcdiii
.section .rodata.str1.8
.align 8
.LC8:
.string "ERROR launching GPU C-CUDA program: %s\n"
.text
.globl _Z10clean_cudav
.type _Z10clean_cudav, @function
_Z10clean_cudav:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call cudaGetLastError@PLT
testl %eax, %eax
jne .L26
.L23:
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaDeviceReset@PLT
jmp .L23
.cfi_endproc
.LFE2062:
.size _Z10clean_cudav, .-_Z10clean_cudav
.globl _Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii
.type _Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii, @function
_Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii:
.LFB2088:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z7memcopyPdS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii, .-_Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii
.globl _Z7memcopyPdS_S_iii
.type _Z7memcopyPdS_S_iii, @function
_Z7memcopyPdS_S_iii:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z7memcopyPdS_S_iii, .-_Z7memcopyPdS_S_iii
.section .rodata.str1.1
.LC9:
.string "Process uses GPU with id %d.\n"
.section .rodata.str1.8
.align 8
.LC11:
.string "%dx%dx%d, %1.3f GB, %d iterations.\n"
.align 8
.LC12:
.string "launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n"
.section .rodata.str1.1
.LC14:
.string "Performance"
.text
.globl main
.type main, @function
main:
.LFB2063:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $0, 4(%rsp)
movl $0, %edi
call cudaSetDevice@PLT
leaq 4(%rsp), %rdi
call cudaGetDevice@PLT
call cudaDeviceReset@PLT
movl $2, %edi
call cudaDeviceSetCacheConfig@PLT
movl 4(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $100, %r9d
movsd .LC10(%rip), %xmm0
movl $256, %r8d
movl $1024, %ecx
movl $1024, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pushq $2
.cfi_def_cfa_offset 120
pushq $16
.cfi_def_cfa_offset 128
movl $32, %r9d
movl $128, %r8d
movl $64, %ecx
movl $32, %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2147483648, %ebx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %rbp
leaq (%rax,%rbx), %rdx
addq $16, %rsp
.cfi_def_cfa_offset 112
.L36:
movq $0x000000000, (%rax)
addq $8, %rax
cmpq %rdx, %rax
jne .L36
leaq 8(%rsp), %rdi
movl $2147483648, %ebx
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r12
leaq (%rax,%rbx), %rdx
.L37:
movq $0x000000000, (%rax)
addq $8, %rax
cmpq %rdx, %rax
jne .L37
leaq 16(%rsp), %rdi
movl $2147483648, %ebx
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r13
leaq (%rax,%rbx), %rdx
.L38:
movq $0x000000000, (%rax)
addq $8, %rax
cmpq %rdx, %rax
jne .L38
leaq 24(%rsp), %rdi
movl $2147483648, %ebx
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %ebx
jmp .L44
.L53:
call _Z3ticv
movl $32, 32(%rsp)
movl $64, 36(%rsp)
movl $32, 44(%rsp)
movl $16, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $2, %ecx
movq 32(%rsp), %rdi
movl $128, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L41
call cudaDeviceSynchronize@PLT
addq $1, %rbx
jmp .L44
.L41:
movl $256, %r9d
movl $1024, %r8d
movl $1024, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z33__device_stub__Z7memcopyPdS_S_iiiPdS_S_iii
.L42:
call cudaDeviceSynchronize@PLT
addq $1, %rbx
cmpq $100, %rbx
je .L52
.L44:
cmpq $10, %rbx
je .L53
movl $32, 32(%rsp)
movl $64, 36(%rsp)
movl $32, 44(%rsp)
movl $16, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $2, %ecx
movq 32(%rsp), %rdi
movl $128, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L42
jmp .L41
.L52:
movl $256, %ecx
movl $1024, %edx
movl $1024, %esi
movsd .LC13(%rip), %xmm0
leaq .LC14(%rip), %rdi
call _Z8timPrintPKcdiii
movq %rbp, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq %r13, %rdi
call free@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
call _Z10clean_cudav
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L54
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size main, .-main
.section .rodata.str1.1
.LC15:
.string "_Z7memcopyPdS_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z7memcopyPdS_S_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl timer_start
.bss
.align 8
.type timer_start, @object
.size timer_start, 8
timer_start:
.zero 8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1598689907
.long 1051772663
.align 8
.LC10:
.long 0
.long 1075314688
.align 8
.LC13:
.long 0
.long 1082187776
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "memcopy.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z7cpu_secv
.LCPI0_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z7cpu_secv
.p2align 4, 0x90
.type _Z7cpu_secv,@function
_Z7cpu_secv: # @_Z7cpu_secv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z7cpu_secv, .Lfunc_end0-_Z7cpu_secv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z3ticv
.LCPI1_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z3ticv
.p2align 4, 0x90
.type _Z3ticv,@function
_Z3ticv: # @_Z3ticv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm0
cvtsi2sdq 16(%rsp), %xmm1
mulsd .LCPI1_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, timer_start(%rip)
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z3ticv, .Lfunc_end1-_Z3ticv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z3tocv
.LCPI2_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z3tocv
.p2align 4, 0x90
.type _Z3tocv,@function
_Z3tocv: # @_Z3tocv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI2_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd timer_start(%rip), %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z3tocv, .Lfunc_end2-_Z3tocv
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z3timPKcd
.LCPI3_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z3timPKcd
.p2align 4, 0x90
.type _Z3timPKcd,@function
_Z3timPKcd: # @_Z3timPKcd
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
movsd %xmm0, 8(%rsp) # 8-byte Spill
movq %rdi, %rbx
leaq 16(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 16(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
mulsd .LCPI3_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd timer_start(%rip), %xmm0
movl $.L.str, %edi
movq %rbx, %rsi
movsd %xmm0, (%rsp) # 8-byte Spill
movb $1, %al
callq printf
xorpd %xmm0, %xmm0
movsd 8(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
jbe .LBB3_2
# %bb.1:
movapd %xmm1, %xmm0
divsd (%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.1, %edi
movb $1, %al
callq printf
.LBB3_2:
movl $10, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp putchar@PLT # TAILCALL
.Lfunc_end3:
.size _Z3timPKcd, .Lfunc_end3-_Z3timPKcd
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z8timPrintPKcdiii
.LCPI4_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z8timPrintPKcdiii
.p2align 4, 0x90
.type _Z8timPrintPKcdiii,@function
_Z8timPrintPKcdiii: # @_Z8timPrintPKcdiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $40, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %ebx
movl %edx, %ebp
movl %esi, %r14d
movsd %xmm0, 8(%rsp) # 8-byte Spill
movq %rdi, %r15
leaq 24(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 24(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 32(%rsp), %xmm0
mulsd .LCPI4_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd timer_start(%rip), %xmm0
movsd %xmm0, 16(%rsp) # 8-byte Spill
movl $.L.str, %edi
movq %r15, %rsi
movb $1, %al
callq printf
xorpd %xmm0, %xmm0
movsd 8(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
ucomisd %xmm0, %xmm1
divsd 16(%rsp), %xmm1 # 8-byte Folded Reload
movsd %xmm1, 8(%rsp) # 8-byte Spill
jbe .LBB4_2
# %bb.1:
movl $.L.str.1, %edi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $1, %al
callq printf
.LBB4_2:
movl $10, %edi
callq putchar@PLT
movl $.L.str.3, %edi
movl $.L.str.4, %esi
callq fopen
movq %rax, %r15
movl $.L.str.5, %esi
movq %rax, %rdi
movl %r14d, %edx
movl %ebp, %ecx
movl %ebx, %r8d
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 16(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $2, %al
callq fprintf
movq %r15, %rdi
addq $40, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp fclose # TAILCALL
.Lfunc_end4:
.size _Z8timPrintPKcdiii, .Lfunc_end4-_Z8timPrintPKcdiii
.cfi_endproc
# -- End function
.globl _Z10clean_cudav # -- Begin function _Z10clean_cudav
.p2align 4, 0x90
.type _Z10clean_cudav,@function
_Z10clean_cudav: # @_Z10clean_cudav
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq hipGetLastError
testl %eax, %eax
je .LBB5_1
# %bb.2:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.6, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
popq %rax
.cfi_def_cfa_offset 8
jmp hipDeviceReset # TAILCALL
.LBB5_1:
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z10clean_cudav, .Lfunc_end5-_Z10clean_cudav
.cfi_endproc
# -- End function
.globl _Z22__device_stub__memcopyPdS_S_iii # -- Begin function _Z22__device_stub__memcopyPdS_S_iii
.p2align 4, 0x90
.type _Z22__device_stub__memcopyPdS_S_iii,@function
_Z22__device_stub__memcopyPdS_S_iii: # @_Z22__device_stub__memcopyPdS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z7memcopyPdS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end6:
.size _Z22__device_stub__memcopyPdS_S_iii, .Lfunc_end6-_Z22__device_stub__memcopyPdS_S_iii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI7_0:
.quad 0x4018000000000000 # double 6
.LCPI7_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI7_2:
.quad 0x4080e00000000000 # double 540
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $0, 12(%rsp)
xorl %edi, %edi
callq hipSetDevice
leaq 12(%rsp), %rdi
callq hipGetDevice
callq hipDeviceReset
movl $2, %edi
callq hipDeviceSetCacheConfig
movl 12(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movsd .LCPI7_0(%rip), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.8, %edi
movl $1024, %esi # imm = 0x400
movl $1024, %edx # imm = 0x400
movl $256, %ecx # imm = 0x100
movl $100, %r8d
movb $1, %al
callq printf
subq $8, %rsp
.cfi_adjust_cfa_offset 8
movl $.L.str.9, %edi
movl $32, %esi
movl $64, %edx
movl $128, %ecx
movl $32, %r8d
movl $16, %r9d
xorl %eax, %eax
pushq $2
.cfi_adjust_cfa_offset 8
callq printf
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $2147483648, %edi # imm = 0x80000000
callq malloc
movq %rax, %rbx
movl $2147483648, %edx # imm = 0x80000000
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 32(%rsp), %rdi
movl $2147483648, %esi # imm = 0x80000000
callq hipMalloc
movq 32(%rsp), %rdi
movl $2147483648, %edx # imm = 0x80000000
movq %rbx, 136(%rsp) # 8-byte Spill
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl $2147483648, %edi # imm = 0x80000000
callq malloc
movq %rax, %rbx
movl $2147483648, %edx # imm = 0x80000000
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 24(%rsp), %rdi
movl $2147483648, %esi # imm = 0x80000000
callq hipMalloc
movq 24(%rsp), %rdi
movl $2147483648, %edx # imm = 0x80000000
movq %rbx, 128(%rsp) # 8-byte Spill
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl $2147483648, %edi # imm = 0x80000000
callq malloc
movq %rax, %rbx
movl $2147483648, %edx # imm = 0x80000000
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 16(%rsp), %rdi
movl $2147483648, %esi # imm = 0x80000000
callq hipMalloc
movq 16(%rsp), %rdi
movl $2147483648, %edx # imm = 0x80000000
movq %rbx, 120(%rsp) # 8-byte Spill
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl $100, %r14d
leaq 64(%rsp), %r12
movabsq $274877906976, %r13 # imm = 0x4000000020
movabsq $68719476768, %rbp # imm = 0x1000000020
leaq 152(%rsp), %r15
leaq 144(%rsp), %rbx
jmp .LBB7_1
.p2align 4, 0x90
.LBB7_3: # in Loop: Header=BB7_1 Depth=1
movq %r13, %rdi
movl $128, %esi
movq %rbp, %rdx
movl $2, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
je .LBB7_4
.LBB7_5: # in Loop: Header=BB7_1 Depth=1
callq hipDeviceSynchronize
decq %r14
je .LBB7_6
.LBB7_1: # =>This Inner Loop Header: Depth=1
cmpq $90, %r14
jne .LBB7_3
# %bb.2: # in Loop: Header=BB7_1 Depth=1
movq %r12, %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 64(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 72(%rsp), %xmm1
mulsd .LCPI7_1(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, timer_start(%rip)
jmp .LBB7_3
.p2align 4, 0x90
.LBB7_4: # in Loop: Header=BB7_1 Depth=1
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 208(%rsp)
movq %rcx, 200(%rsp)
movq %rdx, 192(%rsp)
movl $1024, 52(%rsp) # imm = 0x400
movl $1024, 48(%rsp) # imm = 0x400
movl $256, 44(%rsp) # imm = 0x100
leaq 208(%rsp), %rax
movq %rax, 64(%rsp)
leaq 200(%rsp), %rax
movq %rax, 72(%rsp)
leaq 192(%rsp), %rax
movq %rax, 80(%rsp)
leaq 52(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rax
movq %rax, 96(%rsp)
leaq 44(%rsp), %rax
movq %rax, 104(%rsp)
leaq 176(%rsp), %rdi
leaq 160(%rsp), %rsi
movq %r15, %rdx
movq %rbx, %rcx
callq __hipPopCallConfiguration
movq 176(%rsp), %rsi
movl 184(%rsp), %edx
movq 160(%rsp), %rcx
movl 168(%rsp), %r8d
movl $_Z7memcopyPdS_S_iii, %edi
movq %r12, %r9
pushq 144(%rsp)
.cfi_adjust_cfa_offset 8
pushq 160(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB7_5
.LBB7_6:
leaq 64(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 64(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 72(%rsp), %xmm0
mulsd .LCPI7_1(%rip), %xmm0
addsd %xmm1, %xmm0
subsd timer_start(%rip), %xmm0
movsd %xmm0, 56(%rsp) # 8-byte Spill
movl $.L.str, %edi
movl $.L.str.10, %esi
movb $1, %al
callq printf
movsd .LCPI7_2(%rip), %xmm0 # xmm0 = mem[0],zero
divsd 56(%rsp), %xmm0 # 8-byte Folded Reload
movsd %xmm0, 112(%rsp) # 8-byte Spill
movl $.L.str.1, %edi
movb $1, %al
callq printf
movl $10, %edi
callq putchar@PLT
movl $.L.str.3, %edi
movl $.L.str.4, %esi
callq fopen
movq %rax, %rbx
movl $.L.str.5, %esi
movq %rax, %rdi
movl $1024, %edx # imm = 0x400
movl $1024, %ecx # imm = 0x400
movl $256, %r8d # imm = 0x100
movsd 112(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd 56(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
movb $2, %al
callq fprintf
movq %rbx, %rdi
callq fclose
movq 136(%rsp), %rdi # 8-byte Reload
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 128(%rsp), %rdi # 8-byte Reload
callq free
movq 24(%rsp), %rdi
callq hipFree
movq 120(%rsp), %rdi # 8-byte Reload
callq free
movq 16(%rsp), %rdi
callq hipFree
callq hipGetLastError
testl %eax, %eax
je .LBB7_8
# %bb.7:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.6, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
callq hipDeviceReset
.LBB7_8: # %_Z10clean_cudav.exit
xorl %eax, %eax
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size main, .Lfunc_end7-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7memcopyPdS_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type timer_start,@object # @timer_start
.bss
.globl timer_start
.p2align 3, 0x0
timer_start:
.quad 0x0000000000000000 # double 0
.size timer_start, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%s: %8.3f seconds"
.size .L.str, 18
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz ", %8.3f GB/s"
.size .L.str.1, 13
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "PERF_memcpy.dat"
.size .L.str.3, 16
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "a"
.size .L.str.4, 2
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "nx=%d ny=%d nz=%d GBs=%1.4f time_s=%1.4f \n"
.size .L.str.5, 44
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "ERROR launching GPU C-CUDA program: %s\n"
.size .L.str.6, 40
.type _Z7memcopyPdS_S_iii,@object # @_Z7memcopyPdS_S_iii
.section .rodata,"a",@progbits
.globl _Z7memcopyPdS_S_iii
.p2align 3, 0x0
_Z7memcopyPdS_S_iii:
.quad _Z22__device_stub__memcopyPdS_S_iii
.size _Z7memcopyPdS_S_iii, 8
.type .L.str.7,@object # @.str.7
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.7:
.asciz "Process uses GPU with id %d.\n"
.size .L.str.7, 30
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%dx%dx%d, %1.3f GB, %d iterations.\n"
.size .L.str.8, 36
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "launching (%dx%dx%d) grid of (%dx%dx%d) blocks.\n"
.size .L.str.9, 49
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Performance"
.size .L.str.10, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7memcopyPdS_S_iii"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__memcopyPdS_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7memcopyPdS_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* Daniel Willen, 2019
*
* Solve the transient heat conduction problem with homogeneous Dirichlet
* boundary conditions:
*
* u(x={0,L}) = u(y={0,L}) = 0
*
* and initial condition:
*
* u(x,y,0) = sin(x) * sin(y)
*
* on the domain 0 <= x,y <= L, with L = pi.
*
* This program solves the above problem on a single GPU with the Jacobi method.
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#define PI 3.14159265358979323846
#define MAX_THREADS_DIM 16 // Note that this depends on the hardware
/* Note on the structure of this file:
* - Cuda device constant memory declarations are at the top
* - Functions definitions are in the middle. Functions include:
* - - parse_cmdline: Read command-line arguments for domain size
* - - jacobi_solver: Advance the soln to the next time step using Jacobi
* - - check_error: Calculate the error b/t the numeric and analytic solns
* - The `main' function is at the bottom
*
* Note that it is good practice to use header files and break functions out
* into separate files. This has not been done here for simplicity.
*/
/*** Auxiliary Functions ***/
/* Read the command line inputs */
// - argv[0] is the program name
// - argv[1] is the first input (number of points)
int parse_cmdline(int argc, char *argv[]) {
int nx;
if (argc == 2) {
nx = atoi(argv[1]); // Number of grid points
if (nx < MAX_THREADS_DIM) {
printf("Expecting a number of grid cells in one dimension to be at least %d\n", MAX_THREADS_DIM);
exit(EXIT_FAILURE);
}
printf("Grid is %d by %d\n\n", nx, nx);
} else {
printf("Input error. Run like: \n\n");
printf(" $ ./parallel.c n\n\n");
printf(" where n is the number of grid cells in one dimension\n");
exit(EXIT_FAILURE);
}
return nx;
}
/*******************************************************************************
* Step IV: Launch the GPU kernel to advance to the next time step with the *
* Jacobi method here. *
******************************************************************************/
__global__ void computeNextJacobiStep(int nx, int ny, double pref, double* _u, double* _u_new) {
int ti = blockDim.x * blockIdx.x + threadIdx.x;
int tj = blockDim.y * blockIdx.y + threadIdx.y;
if (ti < (nx-1) && ti > 0 && tj < (ny-1) && tj > 0) {
double leftTerm = _u[tj*nx + ti];
double rightTerm = pref * (
_u[tj*nx + (ti+1)] +
_u[tj*nx + (ti-1)] +
_u[(tj+1)*nx + ti] +
_u[(tj-1)*nx + ti] -
4*_u[tj*nx + ti]
);
_u_new[tj*nx + ti] = leftTerm + rightTerm;
}
}
/******************************************************************************
* Step V: Launch the GPU kernel to calculate the error at each grid point *
* here. *
*****************************************************************************/
__global__ void computeJacobiError(int nx, int ny, double dx, double dy, double D, double t, double* _u, double* _error) {
int ti = blockDim.x * blockIdx.x + threadIdx.x;
int tj = blockDim.y * blockIdx.y + threadIdx.y;
if (ti < (nx-1) && ti > 0 && tj < (ny-1) && tj > 0) {
double discretizedValue = _u[tj*nx + ti];
double analyticalValue = sin(dx * ti)*sin(dy * tj)*exp(-2*D*t);
_error[tj*nx + ti] = abs(discretizedValue - analyticalValue);
}
}
/*** Main Function ***/
int main(int argc, char *argv[])
{
/* Variable declaration */
double Lx = PI; // Domain length in x-direction
double Ly = PI; // Domain length in y-direction
double D = 1.; // Diffusion constant
int nx, ny; // Grid points (grid cells + 1)
double dx, dy; // Grid spacing
double dt; // Time step size
double sim_time; // Length of sim time, arbitrary for simplicity
double pref; // Pre-factor in the Jacobi method
double error = 0.; // Mean percent-difference at each grid point
error = error; // To prevent compiler warning
/* Parse command-line for problem size */
nx = parse_cmdline(argc, argv);
ny = nx; // Assume a square grid
/* Initialize variables */
dx = Lx / (nx - 1); // Cell width in x-direction
dy = Ly / (ny - 1); // Cell width in y-direction
dt = 0.25*dx*dy/D; // Limited by diffusive stability
sim_time = 0.5*Lx*Ly/D; // Arbitrary simulation length
pref = D*dt/(dx*dx); // Jacobi pre-factor
/*****************************************************************************
* Step I: Declare, allocate, and initialize memory for the field variable *
* u on the CPU. *
****************************************************************************/
double* u = (double*) malloc(nx*ny * sizeof(double));
for (int j = 0; j < ny; ++j) {
for (int i = 0; i < nx; ++i) {
u[j*nx + i] = sin(i * dx) * sin(j * dy);
}
}
/*****************************************************************************
* Step II: Declare and allocate GPU memory for _u, _u_new, and _error. Copy *
* the initial condition to the GPU. *
****************************************************************************/
double *_u, *_u_new, *_error;
cudaMalloc(&_u, nx*ny * sizeof(double));
cudaMemcpy(_u, u, nx*ny * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&_u_new, nx*ny * sizeof(double));
cudaMalloc(&_error, nx*ny * sizeof(double));
// Set the new soln and error to 0
cudaMemset(_u_new, 0., nx*ny * sizeof(double));
cudaMemset(_error, 0., nx*ny * sizeof(double));
// Create thrust pointers to device memory for error calculation
thrust::device_ptr<double> t_error(_error);
/*****************************************************************************
* Step III: Set up the kernel execution configuration for the domain based *
* on the input domain size and the MAX_THREADS_DIM variable. *
****************************************************************************/
int tx = MAX_THREADS_DIM;
int ty = MAX_THREADS_DIM;
int bx = (int) ceil((double) nx / tx);
int by = (int) ceil((double) ny / ty);
dim3 dimBlocks(tx, ty);
dim3 numBlocks(bx, by);
/***************************/
/* Main Time-Stepping Loop */
/***************************/
for (double time = 0.; time <= sim_time; time += dt) {
/***************************************************************************
* Step IV: Launch the GPU kernel to advance to the next time step with *
* the Jacobi method here. *
**************************************************************************/
computeNextJacobiStep<<<numBlocks, dimBlocks>>>(nx, ny, pref, _u, _u_new);
cudaDeviceSynchronize();
/***************************************************************************
* Step V: Launch the GPU kernel to calculate the error at each grid point *
* here. *
**************************************************************************/
computeJacobiError<<<numBlocks, dimBlocks>>>(nx, ny, dx, dy, D, time, _u, _error);
cudaDeviceSynchronize();
// Use thrust to do a parallel reduction on the error
error = thrust::reduce(t_error, t_error + nx*ny, 0., thrust::plus<double>());
printf("Error at t* = %.5lf is %e\n", time*D/(Lx*Lx), error/(nx*ny));
// Copy new soln to old. This also blocks to ensure computations are finished.
cudaMemcpy(_u, _u_new, nx*ny * sizeof(double), cudaMemcpyDeviceToDevice);
}
/*****************************************************************************
* Step VI: Copy the memory back to the CPU. *
****************************************************************************/
cudaMemcpy(u, _u, nx*ny * sizeof(double), cudaMemcpyDeviceToHost);
/*****************************************************************************
* Step I and Step II: Free the memory that you declared and allocated *
* earlier in the program. *
****************************************************************************/
free(u);
cudaFree(_u);
cudaFree(_u_new);
cudaFree(_error);
return EXIT_SUCCESS;
} | /* Daniel Willen, 2019
*
* Solve the transient heat conduction problem with homogeneous Dirichlet
* boundary conditions:
*
* u(x={0,L}) = u(y={0,L}) = 0
*
* and initial condition:
*
* u(x,y,0) = sin(x) * sin(y)
*
* on the domain 0 <= x,y <= L, with L = pi.
*
* This program solves the above problem on a single GPU with the Jacobi method.
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#define PI 3.14159265358979323846
#define MAX_THREADS_DIM 16 // Note that this depends on the hardware
/* Note on the structure of this file:
* - Cuda device constant memory declarations are at the top
* - Functions definitions are in the middle. Functions include:
* - - parse_cmdline: Read command-line arguments for domain size
* - - jacobi_solver: Advance the soln to the next time step using Jacobi
* - - check_error: Calculate the error b/t the numeric and analytic solns
* - The `main' function is at the bottom
*
* Note that it is good practice to use header files and break functions out
* into separate files. This has not been done here for simplicity.
*/
/*** Auxiliary Functions ***/
/* Read the command line inputs */
// - argv[0] is the program name
// - argv[1] is the first input (number of points)
int parse_cmdline(int argc, char *argv[]) {
int nx;
if (argc == 2) {
nx = atoi(argv[1]); // Number of grid points
if (nx < MAX_THREADS_DIM) {
printf("Expecting a number of grid cells in one dimension to be at least %d\n", MAX_THREADS_DIM);
exit(EXIT_FAILURE);
}
printf("Grid is %d by %d\n\n", nx, nx);
} else {
printf("Input error. Run like: \n\n");
printf(" $ ./parallel.c n\n\n");
printf(" where n is the number of grid cells in one dimension\n");
exit(EXIT_FAILURE);
}
return nx;
}
/*******************************************************************************
* Step IV: Launch the GPU kernel to advance to the next time step with the *
* Jacobi method here. *
******************************************************************************/
__global__ void computeNextJacobiStep(int nx, int ny, double pref, double* _u, double* _u_new) {
int ti = blockDim.x * blockIdx.x + threadIdx.x;
int tj = blockDim.y * blockIdx.y + threadIdx.y;
if (ti < (nx-1) && ti > 0 && tj < (ny-1) && tj > 0) {
double leftTerm = _u[tj*nx + ti];
double rightTerm = pref * (
_u[tj*nx + (ti+1)] +
_u[tj*nx + (ti-1)] +
_u[(tj+1)*nx + ti] +
_u[(tj-1)*nx + ti] -
4*_u[tj*nx + ti]
);
_u_new[tj*nx + ti] = leftTerm + rightTerm;
}
}
/******************************************************************************
* Step V: Launch the GPU kernel to calculate the error at each grid point *
* here. *
*****************************************************************************/
__global__ void computeJacobiError(int nx, int ny, double dx, double dy, double D, double t, double* _u, double* _error) {
int ti = blockDim.x * blockIdx.x + threadIdx.x;
int tj = blockDim.y * blockIdx.y + threadIdx.y;
if (ti < (nx-1) && ti > 0 && tj < (ny-1) && tj > 0) {
double discretizedValue = _u[tj*nx + ti];
double analyticalValue = sin(dx * ti)*sin(dy * tj)*exp(-2*D*t);
_error[tj*nx + ti] = abs(discretizedValue - analyticalValue);
}
}
/*** Main Function ***/
int main(int argc, char *argv[])
{
/* Variable declaration */
double Lx = PI; // Domain length in x-direction
double Ly = PI; // Domain length in y-direction
double D = 1.; // Diffusion constant
int nx, ny; // Grid points (grid cells + 1)
double dx, dy; // Grid spacing
double dt; // Time step size
double sim_time; // Length of sim time, arbitrary for simplicity
double pref; // Pre-factor in the Jacobi method
double error = 0.; // Mean percent-difference at each grid point
error = error; // To prevent compiler warning
/* Parse command-line for problem size */
nx = parse_cmdline(argc, argv);
ny = nx; // Assume a square grid
/* Initialize variables */
dx = Lx / (nx - 1); // Cell width in x-direction
dy = Ly / (ny - 1); // Cell width in y-direction
dt = 0.25*dx*dy/D; // Limited by diffusive stability
sim_time = 0.5*Lx*Ly/D; // Arbitrary simulation length
pref = D*dt/(dx*dx); // Jacobi pre-factor
/*****************************************************************************
* Step I: Declare, allocate, and initialize memory for the field variable *
* u on the CPU. *
****************************************************************************/
double* u = (double*) malloc(nx*ny * sizeof(double));
for (int j = 0; j < ny; ++j) {
for (int i = 0; i < nx; ++i) {
u[j*nx + i] = sin(i * dx) * sin(j * dy);
}
}
/*****************************************************************************
* Step II: Declare and allocate GPU memory for _u, _u_new, and _error. Copy *
* the initial condition to the GPU. *
****************************************************************************/
double *_u, *_u_new, *_error;
hipMalloc(&_u, nx*ny * sizeof(double));
hipMemcpy(_u, u, nx*ny * sizeof(double), hipMemcpyHostToDevice);
hipMalloc(&_u_new, nx*ny * sizeof(double));
hipMalloc(&_error, nx*ny * sizeof(double));
// Set the new soln and error to 0
hipMemset(_u_new, 0., nx*ny * sizeof(double));
hipMemset(_error, 0., nx*ny * sizeof(double));
// Create thrust pointers to device memory for error calculation
thrust::device_ptr<double> t_error(_error);
/*****************************************************************************
* Step III: Set up the kernel execution configuration for the domain based *
* on the input domain size and the MAX_THREADS_DIM variable. *
****************************************************************************/
int tx = MAX_THREADS_DIM;
int ty = MAX_THREADS_DIM;
int bx = (int) ceil((double) nx / tx);
int by = (int) ceil((double) ny / ty);
dim3 dimBlocks(tx, ty);
dim3 numBlocks(bx, by);
/***************************/
/* Main Time-Stepping Loop */
/***************************/
for (double time = 0.; time <= sim_time; time += dt) {
/***************************************************************************
* Step IV: Launch the GPU kernel to advance to the next time step with *
* the Jacobi method here. *
**************************************************************************/
computeNextJacobiStep<<<numBlocks, dimBlocks>>>(nx, ny, pref, _u, _u_new);
hipDeviceSynchronize();
/***************************************************************************
* Step V: Launch the GPU kernel to calculate the error at each grid point *
* here. *
**************************************************************************/
computeJacobiError<<<numBlocks, dimBlocks>>>(nx, ny, dx, dy, D, time, _u, _error);
hipDeviceSynchronize();
// Use thrust to do a parallel reduction on the error
error = thrust::reduce(t_error, t_error + nx*ny, 0., thrust::plus<double>());
printf("Error at t* = %.5lf is %e\n", time*D/(Lx*Lx), error/(nx*ny));
// Copy new soln to old. This also blocks to ensure computations are finished.
hipMemcpy(_u, _u_new, nx*ny * sizeof(double), hipMemcpyDeviceToDevice);
}
/*****************************************************************************
* Step VI: Copy the memory back to the CPU. *
****************************************************************************/
hipMemcpy(u, _u, nx*ny * sizeof(double), hipMemcpyDeviceToHost);
/*****************************************************************************
* Step I and Step II: Free the memory that you declared and allocated *
* earlier in the program. *
****************************************************************************/
free(u);
hipFree(_u);
hipFree(_u_new);
hipFree(_error);
return EXIT_SUCCESS;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // cudaDCA.cu
//
//This file contains the recursive DCA function, and the function that is used to invoke DCA and
//interperate the results.
//Included Files
#include <iostream>
//Function Prototypes
// Functions found in this file
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[]);
// Functions found in Init_setup.cu
void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]);
// Functions found in Assemble_setup.cu
void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen, int data);
// Functions found in Disassemble_setup.cu
void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[], int data);
// Functions found in Assemble.cu
void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n);
// Functions found in Disassemble.cu
void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd);
// Functions found in SolveBCs.cu
void solve_BCs(double Zs[], double Xs[], double AF[]);
void printa(double A[], int n);
void printm(double A[6][6]);
void Update_Properties(double bodyZetas[],double nZetas[], int n, double state[], double m[], double l[], double II[]);
//DCAhelp:
// Function that prepares the list of bodies for DCA and finds the final state vector
// state is the state of the system at that timestep
// bs is a list of bodies used for initialization
// js is a list of joints
// n is the number of bodies
// Y is the array where the final velocities and accelerations are stored
void DCAhelp(double state[], double m[], double l[], double I[],int n, double Y[],int cut_off, double bZs[])
{
//Create the list that will hold all acceleration and force values for all bodies
double *AF = (double*) malloc(sizeof(double)*n*4*6);
//double A[6][n*2]; //Create the matrix where only the accelerations will be stored
double *Zs=(double*)malloc(sizeof(double)*n*6*26);
double *Xs=(double*)malloc(sizeof(double)*n*5*5);
Update_Properties(bZs,Zs,n,state,m,l,I);
for(int r =0; r<6; r++)
//CudaInitialize(m,l,I, state, n, Zs); //Initialize the bodies, finding all zeta values
//Pass the list of bodies to DCA and return the accelerations
//and forces of both handles of every body in the list
//RecDCA(Zs, n, 0, AF, cut_off,Xs);
Y[n]=AF[8*n]; //For a pendulum, the fist acceleration value is in A[2][0]
for(int i = n+1, j=2; i<n*2; i++, j+=2) //Loop through the acceleration matrix
{
Y[i]=AF[2*4*n+2*j]-AF[2*4*n+2*(j-1)]; //Find and save all generalized accelerations
}
for(int i = 0; i<n; i++) //Loop through the state vector
{
Y[i]=state[i+n]; //Save the velocities
}
//Free memory
free(AF);
free(Zs);
free(Xs);
}
//RecDCA:
// Function used to solve for the velocty and acceleration of the list of bodies at
// the current timestep. This is a recursive function that continues to call itself
// until there is a single body left. Once at this point the accelerations and forces
// are found using the boundary conditions of a pendulum. These values are then returned
// to the previous level of recursion which then finds the new accelerations and forces
// for the disassembled bodies. This continues until all bodies are disassembled, ultimately
// returning the forces and accelerations at both handles of every body in the system. These
// results are intererated by DCAhelp (above) to obtain the actual generalized accelerations.
// bodies is the list of bodies
// n is the number of bodies
// i is the level of recursion
// AF is the array in which the accelerations and forces at the handles of the bodies
// will be stored.
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[],int gpu, int data)
{
if (n==1) //If there is only 1 body
{
}
else //If there is more than 1 body
{
int newlen; //New number of bodies after assembly
int odd = 0; //Flag to keep track of the parity of the length of the list of
if(n % 2 == 0) //If there is an even number of bodies
{
newlen = (int) (n/2); //The new number of bodies will be half the original number
}
else //If there is an odd number of bodies
{
newlen = (int)((n+1)/2); //The new number of bodies will be half the original number //rounded down, plus 1
odd = 1; //odd is set to 1 because there are an odd number of bodies
}
double *nZs=(double*)malloc(sizeof(double)*newlen*26*6);
double *nXs=(double*) malloc(sizeof(double)*(newlen)*5*5);
double *AFo=(double*)malloc(sizeof(double)*6*newlen*4);
//Call the DCA function again to return the accelerations and forces of the new bodies
RecDCA(nZs,newlen,i+1 ,AF,cut_off,nXs,gpu, data);
if(gpu)
{
cudaDisassemble(AFo, Zs,Xs , nZs,nXs, odd, n, newlen, AF,data);
}
else
{
Disassemble(nZs,nXs,Zs,Xs,AFo, AF, newlen,odd);
}
//Free memory
free(nZs);
free(nXs);
}
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // cudaDCA.cu
//
//This file contains the recursive DCA function, and the function that is used to invoke DCA and
//interperate the results.
//Included Files
#include <iostream>
//Function Prototypes
// Functions found in this file
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[]);
// Functions found in Init_setup.cu
void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]);
// Functions found in Assemble_setup.cu
void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen, int data);
// Functions found in Disassemble_setup.cu
void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[], int data);
// Functions found in Assemble.cu
void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n);
// Functions found in Disassemble.cu
void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd);
// Functions found in SolveBCs.cu
void solve_BCs(double Zs[], double Xs[], double AF[]);
void printa(double A[], int n);
void printm(double A[6][6]);
void Update_Properties(double bodyZetas[],double nZetas[], int n, double state[], double m[], double l[], double II[]);
//DCAhelp:
// Function that prepares the list of bodies for DCA and finds the final state vector
// state is the state of the system at that timestep
// bs is a list of bodies used for initialization
// js is a list of joints
// n is the number of bodies
// Y is the array where the final velocities and accelerations are stored
void DCAhelp(double state[], double m[], double l[], double I[],int n, double Y[],int cut_off, double bZs[])
{
//Create the list that will hold all acceleration and force values for all bodies
double *AF = (double*) malloc(sizeof(double)*n*4*6);
//double A[6][n*2]; //Create the matrix where only the accelerations will be stored
double *Zs=(double*)malloc(sizeof(double)*n*6*26);
double *Xs=(double*)malloc(sizeof(double)*n*5*5);
Update_Properties(bZs,Zs,n,state,m,l,I);
for(int r =0; r<6; r++)
//CudaInitialize(m,l,I, state, n, Zs); //Initialize the bodies, finding all zeta values
//Pass the list of bodies to DCA and return the accelerations
//and forces of both handles of every body in the list
//RecDCA(Zs, n, 0, AF, cut_off,Xs);
Y[n]=AF[8*n]; //For a pendulum, the fist acceleration value is in A[2][0]
for(int i = n+1, j=2; i<n*2; i++, j+=2) //Loop through the acceleration matrix
{
Y[i]=AF[2*4*n+2*j]-AF[2*4*n+2*(j-1)]; //Find and save all generalized accelerations
}
for(int i = 0; i<n; i++) //Loop through the state vector
{
Y[i]=state[i+n]; //Save the velocities
}
//Free memory
free(AF);
free(Zs);
free(Xs);
}
//RecDCA:
// Function used to solve for the velocty and acceleration of the list of bodies at
// the current timestep. This is a recursive function that continues to call itself
// until there is a single body left. Once at this point the accelerations and forces
// are found using the boundary conditions of a pendulum. These values are then returned
// to the previous level of recursion which then finds the new accelerations and forces
// for the disassembled bodies. This continues until all bodies are disassembled, ultimately
// returning the forces and accelerations at both handles of every body in the system. These
// results are intererated by DCAhelp (above) to obtain the actual generalized accelerations.
// bodies is the list of bodies
// n is the number of bodies
// i is the level of recursion
// AF is the array in which the accelerations and forces at the handles of the bodies
// will be stored.
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[],int gpu, int data)
{
if (n==1) //If there is only 1 body
{
}
else //If there is more than 1 body
{
int newlen; //New number of bodies after assembly
int odd = 0; //Flag to keep track of the parity of the length of the list of
if(n % 2 == 0) //If there is an even number of bodies
{
newlen = (int) (n/2); //The new number of bodies will be half the original number
}
else //If there is an odd number of bodies
{
newlen = (int)((n+1)/2); //The new number of bodies will be half the original number //rounded down, plus 1
odd = 1; //odd is set to 1 because there are an odd number of bodies
}
double *nZs=(double*)malloc(sizeof(double)*newlen*26*6);
double *nXs=(double*) malloc(sizeof(double)*(newlen)*5*5);
double *AFo=(double*)malloc(sizeof(double)*6*newlen*4);
//Call the DCA function again to return the accelerations and forces of the new bodies
RecDCA(nZs,newlen,i+1 ,AF,cut_off,nXs,gpu, data);
if(gpu)
{
cudaDisassemble(AFo, Zs,Xs , nZs,nXs, odd, n, newlen, AF,data);
}
else
{
Disassemble(nZs,nXs,Zs,Xs,AFo, AF, newlen,odd);
}
//Free memory
free(nZs);
free(nXs);
}
} | .file "tmpxft_00064b10_00000000-6_cudaDCA.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7DCAhelpPdS_S_S_iS_iS_
.type _Z7DCAhelpPdS_S_S_iS_iS_, @function
_Z7DCAhelpPdS_S_S_iS_iS_:
.LFB3669:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %rbx
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movl %r8d, %r12d
movq %r9, %rbp
movslq %r8d, %r14
leaq (%r14,%r14,2), %rdi
salq $6, %rdi
call malloc@PLT
movq %rax, %r13
imulq $1248, %r14, %rdi
call malloc@PLT
movq %rax, %r15
subq $8, %rsp
.cfi_def_cfa_offset 104
pushq 32(%rsp)
.cfi_def_cfa_offset 112
movq 32(%rsp), %r9
movq 24(%rsp), %r8
movq %rbx, %rcx
movl %r12d, %edx
movq %rax, %rsi
movq 120(%rsp), %rdi
call _Z17Update_PropertiesPdS_iS_S_S_S_@PLT
leal 0(,%r12,8), %ecx
movslq %ecx, %rcx
movsd 0(%r13,%rcx,8), %xmm0
movsd %xmm0, 0(%rbp,%rcx)
leal 1(%r12), %edx
leal (%r12,%r12), %eax
addq $16, %rsp
.cfi_def_cfa_offset 96
cmpl %eax, %edx
jge .L4
leal 4(,%r12,8), %eax
cltq
leaq 0(%r13,%rax,8), %rdx
leaq 8(%rbp,%rcx), %rax
leal -2(%r12), %esi
addq %r14, %rsi
leaq 16(%rbp,%rsi,8), %rsi
.L5:
movsd (%rdx), %xmm0
subsd -16(%rdx), %xmm0
movsd %xmm0, (%rax)
addq $32, %rdx
addq $8, %rax
cmpq %rsi, %rax
jne .L5
.L4:
testl %r12d, %r12d
jle .L6
addq %rcx, %rbx
movl $0, %eax
.L7:
movsd (%rbx,%rax), %xmm0
movsd %xmm0, 0(%rbp,%rax)
addq $8, %rax
cmpq %rax, %rcx
jne .L7
.L6:
movq %r13, %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z7DCAhelpPdS_S_S_iS_iS_, .-_Z7DCAhelpPdS_S_S_iS_iS_
.globl _Z6RecDCAPdiiS_iS_ii
.type _Z6RecDCAPdiiS_iS_ii, @function
_Z6RecDCAPdiiS_iS_ii:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, (%rsp)
movl %edx, 12(%rsp)
movl %r8d, 24(%rsp)
movq %r9, 16(%rsp)
cmpl $1, %esi
je .L11
movl %esi, %ebx
movq %rcx, %r12
movl %esi, %eax
andl $1, %eax
movl %eax, 28(%rsp)
jne .L13
movl %esi, %ebp
shrl $31, %ebp
addl %esi, %ebp
sarl %ebp
.L14:
movslq %ebp, %r14
imulq $1248, %r14, %rdi
call malloc@PLT
movq %rax, %r13
leaq (%r14,%r14,4), %rax
leaq (%rax,%rax,4), %rdi
salq $3, %rdi
call malloc@PLT
movq %rax, %r15
leaq (%r14,%r14,2), %rdi
salq $6, %rdi
call malloc@PLT
movq %rax, %r14
movl 12(%rsp), %edx
addl $1, %edx
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 112
movq %r15, %r9
movl 40(%rsp), %r8d
movq %r12, %rcx
movl %ebp, %esi
movq %r13, %rdi
call _Z6RecDCAPdiiS_iS_ii
addq $16, %rsp
.cfi_def_cfa_offset 96
cmpl $0, 96(%rsp)
je .L15
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
pushq %r12
.cfi_def_cfa_offset 112
pushq %rbp
.cfi_def_cfa_offset 120
pushq %rbx
.cfi_def_cfa_offset 128
movl 60(%rsp), %r9d
movq %r15, %r8
movq %r13, %rcx
movq 48(%rsp), %rdx
movq 32(%rsp), %rsi
movq %r14, %rdi
call _Z15cudaDisassemblePdS_S_S_S_iiiS_i@PLT
addq $32, %rsp
.cfi_def_cfa_offset 96
.L16:
movq %r13, %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
.L11:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
leal 1(%rsi), %eax
movl %eax, %ebp
shrl $31, %ebp
addl %eax, %ebp
sarl %ebp
jmp .L14
.L15:
movl 28(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
pushq %rbp
.cfi_def_cfa_offset 112
movq %r12, %r9
movq %r14, %r8
movq 32(%rsp), %rcx
movq 16(%rsp), %rdx
movq %r15, %rsi
movq %r13, %rdi
call _Z11DisassemblePdS_S_S_S_S_ii@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L16
.cfi_endproc
.LFE3670:
.size _Z6RecDCAPdiiS_iS_ii, .-_Z6RecDCAPdiiS_iS_ii
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // cudaDCA.cu
//
//This file contains the recursive DCA function, and the function that is used to invoke DCA and
//interperate the results.
//Included Files
#include <iostream>
//Function Prototypes
// Functions found in this file
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[]);
// Functions found in Init_setup.cu
void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]);
// Functions found in Assemble_setup.cu
void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen, int data);
// Functions found in Disassemble_setup.cu
void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[], int data);
// Functions found in Assemble.cu
void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n);
// Functions found in Disassemble.cu
void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd);
// Functions found in SolveBCs.cu
void solve_BCs(double Zs[], double Xs[], double AF[]);
void printa(double A[], int n);
void printm(double A[6][6]);
void Update_Properties(double bodyZetas[],double nZetas[], int n, double state[], double m[], double l[], double II[]);
//DCAhelp:
// Function that prepares the list of bodies for DCA and finds the final state vector
// state is the state of the system at that timestep
// bs is a list of bodies used for initialization
// js is a list of joints
// n is the number of bodies
// Y is the array where the final velocities and accelerations are stored
void DCAhelp(double state[], double m[], double l[], double I[],int n, double Y[],int cut_off, double bZs[])
{
//Create the list that will hold all acceleration and force values for all bodies
double *AF = (double*) malloc(sizeof(double)*n*4*6);
//double A[6][n*2]; //Create the matrix where only the accelerations will be stored
double *Zs=(double*)malloc(sizeof(double)*n*6*26);
double *Xs=(double*)malloc(sizeof(double)*n*5*5);
Update_Properties(bZs,Zs,n,state,m,l,I);
for(int r =0; r<6; r++)
//CudaInitialize(m,l,I, state, n, Zs); //Initialize the bodies, finding all zeta values
//Pass the list of bodies to DCA and return the accelerations
//and forces of both handles of every body in the list
//RecDCA(Zs, n, 0, AF, cut_off,Xs);
Y[n]=AF[8*n]; //For a pendulum, the fist acceleration value is in A[2][0]
for(int i = n+1, j=2; i<n*2; i++, j+=2) //Loop through the acceleration matrix
{
Y[i]=AF[2*4*n+2*j]-AF[2*4*n+2*(j-1)]; //Find and save all generalized accelerations
}
for(int i = 0; i<n; i++) //Loop through the state vector
{
Y[i]=state[i+n]; //Save the velocities
}
//Free memory
free(AF);
free(Zs);
free(Xs);
}
//RecDCA:
// Function used to solve for the velocty and acceleration of the list of bodies at
// the current timestep. This is a recursive function that continues to call itself
// until there is a single body left. Once at this point the accelerations and forces
// are found using the boundary conditions of a pendulum. These values are then returned
// to the previous level of recursion which then finds the new accelerations and forces
// for the disassembled bodies. This continues until all bodies are disassembled, ultimately
// returning the forces and accelerations at both handles of every body in the system. These
// results are intererated by DCAhelp (above) to obtain the actual generalized accelerations.
// bodies is the list of bodies
// n is the number of bodies
// i is the level of recursion
// AF is the array in which the accelerations and forces at the handles of the bodies
// will be stored.
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[],int gpu, int data)
{
if (n==1) //If there is only 1 body
{
}
else //If there is more than 1 body
{
int newlen; //New number of bodies after assembly
int odd = 0; //Flag to keep track of the parity of the length of the list of
if(n % 2 == 0) //If there is an even number of bodies
{
newlen = (int) (n/2); //The new number of bodies will be half the original number
}
else //If there is an odd number of bodies
{
newlen = (int)((n+1)/2); //The new number of bodies will be half the original number //rounded down, plus 1
odd = 1; //odd is set to 1 because there are an odd number of bodies
}
double *nZs=(double*)malloc(sizeof(double)*newlen*26*6);
double *nXs=(double*) malloc(sizeof(double)*(newlen)*5*5);
double *AFo=(double*)malloc(sizeof(double)*6*newlen*4);
//Call the DCA function again to return the accelerations and forces of the new bodies
RecDCA(nZs,newlen,i+1 ,AF,cut_off,nXs,gpu, data);
if(gpu)
{
cudaDisassemble(AFo, Zs,Xs , nZs,nXs, odd, n, newlen, AF,data);
}
else
{
Disassemble(nZs,nXs,Zs,Xs,AFo, AF, newlen,odd);
}
//Free memory
free(nZs);
free(nXs);
}
} | // cudaDCA.cu
//
//This file contains the recursive DCA function, and the function that is used to invoke DCA and
//interperate the results.
//Included Files
#include <hip/hip_runtime.h>
#include <iostream>
//Function Prototypes
// Functions found in this file
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[]);
// Functions found in Init_setup.cu
void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]);
// Functions found in Assemble_setup.cu
void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen, int data);
// Functions found in Disassemble_setup.cu
void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[], int data);
// Functions found in Assemble.cu
void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n);
// Functions found in Disassemble.cu
void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd);
// Functions found in SolveBCs.cu
void solve_BCs(double Zs[], double Xs[], double AF[]);
void printa(double A[], int n);
void printm(double A[6][6]);
void Update_Properties(double bodyZetas[],double nZetas[], int n, double state[], double m[], double l[], double II[]);
//DCAhelp:
// Function that prepares the list of bodies for DCA and finds the final state vector
// state is the state of the system at that timestep
// bs is a list of bodies used for initialization
// js is a list of joints
// n is the number of bodies
// Y is the array where the final velocities and accelerations are stored
void DCAhelp(double state[], double m[], double l[], double I[],int n, double Y[],int cut_off, double bZs[])
{
//Create the list that will hold all acceleration and force values for all bodies
double *AF = (double*) malloc(sizeof(double)*n*4*6);
//double A[6][n*2]; //Create the matrix where only the accelerations will be stored
double *Zs=(double*)malloc(sizeof(double)*n*6*26);
double *Xs=(double*)malloc(sizeof(double)*n*5*5);
Update_Properties(bZs,Zs,n,state,m,l,I);
for(int r =0; r<6; r++)
//CudaInitialize(m,l,I, state, n, Zs); //Initialize the bodies, finding all zeta values
//Pass the list of bodies to DCA and return the accelerations
//and forces of both handles of every body in the list
//RecDCA(Zs, n, 0, AF, cut_off,Xs);
Y[n]=AF[8*n]; //For a pendulum, the fist acceleration value is in A[2][0]
for(int i = n+1, j=2; i<n*2; i++, j+=2) //Loop through the acceleration matrix
{
Y[i]=AF[2*4*n+2*j]-AF[2*4*n+2*(j-1)]; //Find and save all generalized accelerations
}
for(int i = 0; i<n; i++) //Loop through the state vector
{
Y[i]=state[i+n]; //Save the velocities
}
//Free memory
free(AF);
free(Zs);
free(Xs);
}
//RecDCA:
// Function used to solve for the velocty and acceleration of the list of bodies at
// the current timestep. This is a recursive function that continues to call itself
// until there is a single body left. Once at this point the accelerations and forces
// are found using the boundary conditions of a pendulum. These values are then returned
// to the previous level of recursion which then finds the new accelerations and forces
// for the disassembled bodies. This continues until all bodies are disassembled, ultimately
// returning the forces and accelerations at both handles of every body in the system. These
// results are intererated by DCAhelp (above) to obtain the actual generalized accelerations.
// bodies is the list of bodies
// n is the number of bodies
// i is the level of recursion
// AF is the array in which the accelerations and forces at the handles of the bodies
// will be stored.
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[],int gpu, int data)
{
if (n==1) //If there is only 1 body
{
}
else //If there is more than 1 body
{
int newlen; //New number of bodies after assembly
int odd = 0; //Flag to keep track of the parity of the length of the list of
if(n % 2 == 0) //If there is an even number of bodies
{
newlen = (int) (n/2); //The new number of bodies will be half the original number
}
else //If there is an odd number of bodies
{
newlen = (int)((n+1)/2); //The new number of bodies will be half the original number //rounded down, plus 1
odd = 1; //odd is set to 1 because there are an odd number of bodies
}
double *nZs=(double*)malloc(sizeof(double)*newlen*26*6);
double *nXs=(double*) malloc(sizeof(double)*(newlen)*5*5);
double *AFo=(double*)malloc(sizeof(double)*6*newlen*4);
//Call the DCA function again to return the accelerations and forces of the new bodies
RecDCA(nZs,newlen,i+1 ,AF,cut_off,nXs,gpu, data);
if(gpu)
{
cudaDisassemble(AFo, Zs,Xs , nZs,nXs, odd, n, newlen, AF,data);
}
else
{
Disassemble(nZs,nXs,Zs,Xs,AFo, AF, newlen,odd);
}
//Free memory
free(nZs);
free(nXs);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // cudaDCA.cu
//
//This file contains the recursive DCA function, and the function that is used to invoke DCA and
//interperate the results.
//Included Files
#include <hip/hip_runtime.h>
#include <iostream>
//Function Prototypes
// Functions found in this file
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[]);
// Functions found in Init_setup.cu
void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]);
// Functions found in Assemble_setup.cu
void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen, int data);
// Functions found in Disassemble_setup.cu
void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[], int data);
// Functions found in Assemble.cu
void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n);
// Functions found in Disassemble.cu
void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd);
// Functions found in SolveBCs.cu
void solve_BCs(double Zs[], double Xs[], double AF[]);
void printa(double A[], int n);
void printm(double A[6][6]);
void Update_Properties(double bodyZetas[],double nZetas[], int n, double state[], double m[], double l[], double II[]);
//DCAhelp:
// Function that prepares the list of bodies for DCA and finds the final state vector
// state is the state of the system at that timestep
// bs is a list of bodies used for initialization
// js is a list of joints
// n is the number of bodies
// Y is the array where the final velocities and accelerations are stored
void DCAhelp(double state[], double m[], double l[], double I[],int n, double Y[],int cut_off, double bZs[])
{
//Create the list that will hold all acceleration and force values for all bodies
double *AF = (double*) malloc(sizeof(double)*n*4*6);
//double A[6][n*2]; //Create the matrix where only the accelerations will be stored
double *Zs=(double*)malloc(sizeof(double)*n*6*26);
double *Xs=(double*)malloc(sizeof(double)*n*5*5);
Update_Properties(bZs,Zs,n,state,m,l,I);
for(int r =0; r<6; r++)
//CudaInitialize(m,l,I, state, n, Zs); //Initialize the bodies, finding all zeta values
//Pass the list of bodies to DCA and return the accelerations
//and forces of both handles of every body in the list
//RecDCA(Zs, n, 0, AF, cut_off,Xs);
Y[n]=AF[8*n]; //For a pendulum, the fist acceleration value is in A[2][0]
for(int i = n+1, j=2; i<n*2; i++, j+=2) //Loop through the acceleration matrix
{
Y[i]=AF[2*4*n+2*j]-AF[2*4*n+2*(j-1)]; //Find and save all generalized accelerations
}
for(int i = 0; i<n; i++) //Loop through the state vector
{
Y[i]=state[i+n]; //Save the velocities
}
//Free memory
free(AF);
free(Zs);
free(Xs);
}
//RecDCA:
// Function used to solve for the velocty and acceleration of the list of bodies at
// the current timestep. This is a recursive function that continues to call itself
// until there is a single body left. Once at this point the accelerations and forces
// are found using the boundary conditions of a pendulum. These values are then returned
// to the previous level of recursion which then finds the new accelerations and forces
// for the disassembled bodies. This continues until all bodies are disassembled, ultimately
// returning the forces and accelerations at both handles of every body in the system. These
// results are intererated by DCAhelp (above) to obtain the actual generalized accelerations.
// bodies is the list of bodies
// n is the number of bodies
// i is the level of recursion
// AF is the array in which the accelerations and forces at the handles of the bodies
// will be stored.
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[],int gpu, int data)
{
if (n==1) //If there is only 1 body
{
}
else //If there is more than 1 body
{
int newlen; //New number of bodies after assembly
int odd = 0; //Flag to keep track of the parity of the length of the list of
if(n % 2 == 0) //If there is an even number of bodies
{
newlen = (int) (n/2); //The new number of bodies will be half the original number
}
else //If there is an odd number of bodies
{
newlen = (int)((n+1)/2); //The new number of bodies will be half the original number //rounded down, plus 1
odd = 1; //odd is set to 1 because there are an odd number of bodies
}
double *nZs=(double*)malloc(sizeof(double)*newlen*26*6);
double *nXs=(double*) malloc(sizeof(double)*(newlen)*5*5);
double *AFo=(double*)malloc(sizeof(double)*6*newlen*4);
//Call the DCA function again to return the accelerations and forces of the new bodies
RecDCA(nZs,newlen,i+1 ,AF,cut_off,nXs,gpu, data);
if(gpu)
{
cudaDisassemble(AFo, Zs,Xs , nZs,nXs, odd, n, newlen, AF,data);
}
else
{
Disassemble(nZs,nXs,Zs,Xs,AFo, AF, newlen,odd);
}
//Free memory
free(nZs);
free(nXs);
}
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // cudaDCA.cu
//
//This file contains the recursive DCA function, and the function that is used to invoke DCA and
//interperate the results.
//Included Files
#include <hip/hip_runtime.h>
#include <iostream>
//Function Prototypes
// Functions found in this file
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[]);
// Functions found in Init_setup.cu
void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]);
// Functions found in Assemble_setup.cu
void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen, int data);
// Functions found in Disassemble_setup.cu
void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[], int data);
// Functions found in Assemble.cu
void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n);
// Functions found in Disassemble.cu
void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd);
// Functions found in SolveBCs.cu
void solve_BCs(double Zs[], double Xs[], double AF[]);
void printa(double A[], int n);
void printm(double A[6][6]);
void Update_Properties(double bodyZetas[],double nZetas[], int n, double state[], double m[], double l[], double II[]);
//DCAhelp:
// Function that prepares the list of bodies for DCA and finds the final state vector
// state is the state of the system at that timestep
// bs is a list of bodies used for initialization
// js is a list of joints
// n is the number of bodies
// Y is the array where the final velocities and accelerations are stored
void DCAhelp(double state[], double m[], double l[], double I[],int n, double Y[],int cut_off, double bZs[])
{
//Create the list that will hold all acceleration and force values for all bodies
double *AF = (double*) malloc(sizeof(double)*n*4*6);
//double A[6][n*2]; //Create the matrix where only the accelerations will be stored
double *Zs=(double*)malloc(sizeof(double)*n*6*26);
double *Xs=(double*)malloc(sizeof(double)*n*5*5);
Update_Properties(bZs,Zs,n,state,m,l,I);
for(int r =0; r<6; r++)
//CudaInitialize(m,l,I, state, n, Zs); //Initialize the bodies, finding all zeta values
//Pass the list of bodies to DCA and return the accelerations
//and forces of both handles of every body in the list
//RecDCA(Zs, n, 0, AF, cut_off,Xs);
Y[n]=AF[8*n]; //For a pendulum, the fist acceleration value is in A[2][0]
for(int i = n+1, j=2; i<n*2; i++, j+=2) //Loop through the acceleration matrix
{
Y[i]=AF[2*4*n+2*j]-AF[2*4*n+2*(j-1)]; //Find and save all generalized accelerations
}
for(int i = 0; i<n; i++) //Loop through the state vector
{
Y[i]=state[i+n]; //Save the velocities
}
//Free memory
free(AF);
free(Zs);
free(Xs);
}
//RecDCA:
// Function used to solve for the velocty and acceleration of the list of bodies at
// the current timestep. This is a recursive function that continues to call itself
// until there is a single body left. Once at this point the accelerations and forces
// are found using the boundary conditions of a pendulum. These values are then returned
// to the previous level of recursion which then finds the new accelerations and forces
// for the disassembled bodies. This continues until all bodies are disassembled, ultimately
// returning the forces and accelerations at both handles of every body in the system. These
// results are intererated by DCAhelp (above) to obtain the actual generalized accelerations.
// bodies is the list of bodies
// n is the number of bodies
// i is the level of recursion
// AF is the array in which the accelerations and forces at the handles of the bodies
// will be stored.
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[],int gpu, int data)
{
if (n==1) //If there is only 1 body
{
}
else //If there is more than 1 body
{
int newlen; //New number of bodies after assembly
int odd = 0; //Flag to keep track of the parity of the length of the list of
if(n % 2 == 0) //If there is an even number of bodies
{
newlen = (int) (n/2); //The new number of bodies will be half the original number
}
else //If there is an odd number of bodies
{
newlen = (int)((n+1)/2); //The new number of bodies will be half the original number //rounded down, plus 1
odd = 1; //odd is set to 1 because there are an odd number of bodies
}
double *nZs=(double*)malloc(sizeof(double)*newlen*26*6);
double *nXs=(double*) malloc(sizeof(double)*(newlen)*5*5);
double *AFo=(double*)malloc(sizeof(double)*6*newlen*4);
//Call the DCA function again to return the accelerations and forces of the new bodies
RecDCA(nZs,newlen,i+1 ,AF,cut_off,nXs,gpu, data);
if(gpu)
{
cudaDisassemble(AFo, Zs,Xs , nZs,nXs, odd, n, newlen, AF,data);
}
else
{
Disassemble(nZs,nXs,Zs,Xs,AFo, AF, newlen,odd);
}
//Free memory
free(nZs);
free(nXs);
}
} | .text
.file "cudaDCA.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z7DCAhelpPdS_S_S_iS_iS_ # -- Begin function _Z7DCAhelpPdS_S_S_iS_iS_
.p2align 4, 0x90
.type _Z7DCAhelpPdS_S_S_iS_iS_,@function
_Z7DCAhelpPdS_S_S_iS_iS_: # @_Z7DCAhelpPdS_S_S_iS_iS_
.cfi_startproc
# %bb.0: # %.preheader43
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, %rbx
movl %r8d, %r14d
movq %rcx, 16(%rsp) # 8-byte Spill
movq %rdx, 32(%rsp) # 8-byte Spill
movq %rsi, 24(%rsp) # 8-byte Spill
movq %rdi, %r13
movq 104(%rsp), %r15
movslq %r8d, %rbp
movq %rbp, %rax
shlq $6, %rax
leaq (%rax,%rax,2), %rdi
callq malloc
movq %rax, %r12
imulq $1248, %rbp, %rdi # imm = 0x4E0
callq malloc
movq 16(%rsp), %rcx # 8-byte Reload
movq %rcx, (%rsp)
movq %r15, %rdi
movq %rax, %r15
movq %rax, %rsi
movl %ebp, %edx
movq %r13, %rcx
movq 24(%rsp), %r8 # 8-byte Reload
movq 32(%rsp), %r9 # 8-byte Reload
callq _Z17Update_PropertiesPdS_iS_S_S_S_
leal (,%rbp,8), %eax
cltq
movsd (%r12,%rax,8), %xmm0 # xmm0 = mem[0],zero
movsd %xmm0, (%rbx,%rbp,8)
leal (,%rbp,2), %edx
leal 1(%rbp), %ecx
cmpl %edx, %ecx
jge .LBB0_3
# %bb.1: # %.lr.ph
movslq %ecx, %rcx
leaq (%r12,%rax,8), %rax
addq $32, %rax
leaq (%rbx,%rcx,8), %rcx
leal -1(%r14), %edx
xorl %esi, %esi
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movsd (%rax), %xmm0 # xmm0 = mem[0],zero
subsd -16(%rax), %xmm0
movsd %xmm0, (%rcx,%rsi,8)
addq $32, %rax
incq %rsi
cmpl %esi, %edx
jne .LBB0_2
.LBB0_3: # %.preheader
testl %r14d, %r14d
jle .LBB0_6
# %bb.4: # %.lr.ph48.preheader
movl %r14d, %eax
leaq (,%rax,8), %rcx
addq %r13, %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB0_5: # %.lr.ph48
# =>This Inner Loop Header: Depth=1
movsd (%rcx,%rdx,8), %xmm0 # xmm0 = mem[0],zero
movsd %xmm0, (%rbx,%rdx,8)
incq %rdx
cmpq %rdx, %rax
jne .LBB0_5
.LBB0_6: # %._crit_edge
movq %r12, %rdi
callq free
movq %r15, %rdi
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp free # TAILCALL
.Lfunc_end0:
.size _Z7DCAhelpPdS_S_S_iS_iS_, .Lfunc_end0-_Z7DCAhelpPdS_S_S_iS_iS_
.cfi_endproc
# -- End function
.globl _Z6RecDCAPdiiS_iS_ii # -- Begin function _Z6RecDCAPdiiS_iS_ii
.p2align 4, 0x90
.type _Z6RecDCAPdiiS_iS_ii,@function
_Z6RecDCAPdiiS_iS_ii: # @_Z6RecDCAPdiiS_iS_ii
.cfi_startproc
# %bb.0:
cmpl $1, %esi
jne .LBB1_1
# %bb.5:
retq
.LBB1_1:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $72, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %ebx
movl %esi, %r15d
movq %rdi, 16(%rsp) # 8-byte Spill
movq %r9, 24(%rsp) # 8-byte Spill
movl 136(%rsp), %eax
movq %rax, 40(%rsp) # 8-byte Spill
movl 128(%rsp), %eax
movq %rax, 56(%rsp) # 8-byte Spill
movl %esi, %ebp
andl $1, %ebp
leal (%r15,%rbp), %r12d
sarl %r12d
movq %rcx, 64(%rsp) # 8-byte Spill
movslq %r12d, %r14
imulq $1248, %r14, %rdi # imm = 0x4E0
movl %r8d, 4(%rsp) # 4-byte Spill
callq malloc
movq %rax, %r13
imulq $200, %r14, %rdi
callq malloc
movq %rax, 48(%rsp) # 8-byte Spill
movq %r14, %rax
shlq $6, %rax
leaq (%rax,%rax,2), %rdi
callq malloc
movq %rax, 8(%rsp) # 8-byte Spill
incl %ebx
movq %r13, 32(%rsp) # 8-byte Spill
movq %r13, %rdi
movq 48(%rsp), %r13 # 8-byte Reload
movl %r14d, %esi
movq 64(%rsp), %r14 # 8-byte Reload
movl %ebx, %edx
movq %r14, %rcx
movl 4(%rsp), %r8d # 4-byte Reload
movq %r13, %r9
pushq 40(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
movq 64(%rsp), %rbx # 8-byte Reload
pushq %rbx
.cfi_adjust_cfa_offset 8
callq _Z6RecDCAPdiiS_iS_ii
addq $16, %rsp
.cfi_adjust_cfa_offset -16
testl %ebx, %ebx
je .LBB1_3
# %bb.2:
movq 8(%rsp), %rdi # 8-byte Reload
movq 16(%rsp), %rsi # 8-byte Reload
movq 24(%rsp), %rdx # 8-byte Reload
movq 32(%rsp), %rbx # 8-byte Reload
movq %rbx, %rcx
movq %r13, %r8
movl %ebp, %r9d
pushq 40(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq %r14
.cfi_adjust_cfa_offset 8
pushq %r12
.cfi_adjust_cfa_offset 8
pushq %r15
.cfi_adjust_cfa_offset 8
callq _Z15cudaDisassemblePdS_S_S_S_iiiS_i
addq $32, %rsp
.cfi_adjust_cfa_offset -32
jmp .LBB1_4
.LBB1_3:
movq 32(%rsp), %rbx # 8-byte Reload
movq %rbx, %rdi
movq %r13, %rsi
movq 16(%rsp), %rdx # 8-byte Reload
movq 24(%rsp), %rcx # 8-byte Reload
movq 8(%rsp), %r8 # 8-byte Reload
movq %r14, %r9
pushq %rbp
.cfi_adjust_cfa_offset 8
pushq %r12
.cfi_adjust_cfa_offset 8
callq _Z11DisassemblePdS_S_S_S_S_ii
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq %rbx, %rdi
callq free
movq %r13, %rdi
addq $72, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp free # TAILCALL
.Lfunc_end1:
.size _Z6RecDCAPdiiS_iS_ii, .Lfunc_end1-_Z6RecDCAPdiiS_iS_ii
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00064b10_00000000-6_cudaDCA.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7DCAhelpPdS_S_S_iS_iS_
.type _Z7DCAhelpPdS_S_S_iS_iS_, @function
_Z7DCAhelpPdS_S_S_iS_iS_:
.LFB3669:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %rbx
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movl %r8d, %r12d
movq %r9, %rbp
movslq %r8d, %r14
leaq (%r14,%r14,2), %rdi
salq $6, %rdi
call malloc@PLT
movq %rax, %r13
imulq $1248, %r14, %rdi
call malloc@PLT
movq %rax, %r15
subq $8, %rsp
.cfi_def_cfa_offset 104
pushq 32(%rsp)
.cfi_def_cfa_offset 112
movq 32(%rsp), %r9
movq 24(%rsp), %r8
movq %rbx, %rcx
movl %r12d, %edx
movq %rax, %rsi
movq 120(%rsp), %rdi
call _Z17Update_PropertiesPdS_iS_S_S_S_@PLT
leal 0(,%r12,8), %ecx
movslq %ecx, %rcx
movsd 0(%r13,%rcx,8), %xmm0
movsd %xmm0, 0(%rbp,%rcx)
leal 1(%r12), %edx
leal (%r12,%r12), %eax
addq $16, %rsp
.cfi_def_cfa_offset 96
cmpl %eax, %edx
jge .L4
leal 4(,%r12,8), %eax
cltq
leaq 0(%r13,%rax,8), %rdx
leaq 8(%rbp,%rcx), %rax
leal -2(%r12), %esi
addq %r14, %rsi
leaq 16(%rbp,%rsi,8), %rsi
.L5:
movsd (%rdx), %xmm0
subsd -16(%rdx), %xmm0
movsd %xmm0, (%rax)
addq $32, %rdx
addq $8, %rax
cmpq %rsi, %rax
jne .L5
.L4:
testl %r12d, %r12d
jle .L6
addq %rcx, %rbx
movl $0, %eax
.L7:
movsd (%rbx,%rax), %xmm0
movsd %xmm0, 0(%rbp,%rax)
addq $8, %rax
cmpq %rax, %rcx
jne .L7
.L6:
movq %r13, %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z7DCAhelpPdS_S_S_iS_iS_, .-_Z7DCAhelpPdS_S_S_iS_iS_
.globl _Z6RecDCAPdiiS_iS_ii
.type _Z6RecDCAPdiiS_iS_ii, @function
_Z6RecDCAPdiiS_iS_ii:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, (%rsp)
movl %edx, 12(%rsp)
movl %r8d, 24(%rsp)
movq %r9, 16(%rsp)
cmpl $1, %esi
je .L11
movl %esi, %ebx
movq %rcx, %r12
movl %esi, %eax
andl $1, %eax
movl %eax, 28(%rsp)
jne .L13
movl %esi, %ebp
shrl $31, %ebp
addl %esi, %ebp
sarl %ebp
.L14:
movslq %ebp, %r14
imulq $1248, %r14, %rdi
call malloc@PLT
movq %rax, %r13
leaq (%r14,%r14,4), %rax
leaq (%rax,%rax,4), %rdi
salq $3, %rdi
call malloc@PLT
movq %rax, %r15
leaq (%r14,%r14,2), %rdi
salq $6, %rdi
call malloc@PLT
movq %rax, %r14
movl 12(%rsp), %edx
addl $1, %edx
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 112
movq %r15, %r9
movl 40(%rsp), %r8d
movq %r12, %rcx
movl %ebp, %esi
movq %r13, %rdi
call _Z6RecDCAPdiiS_iS_ii
addq $16, %rsp
.cfi_def_cfa_offset 96
cmpl $0, 96(%rsp)
je .L15
movl 104(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
pushq %r12
.cfi_def_cfa_offset 112
pushq %rbp
.cfi_def_cfa_offset 120
pushq %rbx
.cfi_def_cfa_offset 128
movl 60(%rsp), %r9d
movq %r15, %r8
movq %r13, %rcx
movq 48(%rsp), %rdx
movq 32(%rsp), %rsi
movq %r14, %rdi
call _Z15cudaDisassemblePdS_S_S_S_iiiS_i@PLT
addq $32, %rsp
.cfi_def_cfa_offset 96
.L16:
movq %r13, %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
.L11:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
leal 1(%rsi), %eax
movl %eax, %ebp
shrl $31, %ebp
addl %eax, %ebp
sarl %ebp
jmp .L14
.L15:
movl 28(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
pushq %rbp
.cfi_def_cfa_offset 112
movq %r12, %r9
movq %r14, %r8
movq 32(%rsp), %rcx
movq 16(%rsp), %rdx
movq %r15, %rsi
movq %r13, %rdi
call _Z11DisassemblePdS_S_S_S_S_ii@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L16
.cfi_endproc
.LFE3670:
.size _Z6RecDCAPdiiS_iS_ii, .-_Z6RecDCAPdiiS_iS_ii
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudaDCA.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z7DCAhelpPdS_S_S_iS_iS_ # -- Begin function _Z7DCAhelpPdS_S_S_iS_iS_
.p2align 4, 0x90
.type _Z7DCAhelpPdS_S_S_iS_iS_,@function
_Z7DCAhelpPdS_S_S_iS_iS_: # @_Z7DCAhelpPdS_S_S_iS_iS_
.cfi_startproc
# %bb.0: # %.preheader43
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, %rbx
movl %r8d, %r14d
movq %rcx, 16(%rsp) # 8-byte Spill
movq %rdx, 32(%rsp) # 8-byte Spill
movq %rsi, 24(%rsp) # 8-byte Spill
movq %rdi, %r13
movq 104(%rsp), %r15
movslq %r8d, %rbp
movq %rbp, %rax
shlq $6, %rax
leaq (%rax,%rax,2), %rdi
callq malloc
movq %rax, %r12
imulq $1248, %rbp, %rdi # imm = 0x4E0
callq malloc
movq 16(%rsp), %rcx # 8-byte Reload
movq %rcx, (%rsp)
movq %r15, %rdi
movq %rax, %r15
movq %rax, %rsi
movl %ebp, %edx
movq %r13, %rcx
movq 24(%rsp), %r8 # 8-byte Reload
movq 32(%rsp), %r9 # 8-byte Reload
callq _Z17Update_PropertiesPdS_iS_S_S_S_
leal (,%rbp,8), %eax
cltq
movsd (%r12,%rax,8), %xmm0 # xmm0 = mem[0],zero
movsd %xmm0, (%rbx,%rbp,8)
leal (,%rbp,2), %edx
leal 1(%rbp), %ecx
cmpl %edx, %ecx
jge .LBB0_3
# %bb.1: # %.lr.ph
movslq %ecx, %rcx
leaq (%r12,%rax,8), %rax
addq $32, %rax
leaq (%rbx,%rcx,8), %rcx
leal -1(%r14), %edx
xorl %esi, %esi
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movsd (%rax), %xmm0 # xmm0 = mem[0],zero
subsd -16(%rax), %xmm0
movsd %xmm0, (%rcx,%rsi,8)
addq $32, %rax
incq %rsi
cmpl %esi, %edx
jne .LBB0_2
.LBB0_3: # %.preheader
testl %r14d, %r14d
jle .LBB0_6
# %bb.4: # %.lr.ph48.preheader
movl %r14d, %eax
leaq (,%rax,8), %rcx
addq %r13, %rcx
xorl %edx, %edx
.p2align 4, 0x90
.LBB0_5: # %.lr.ph48
# =>This Inner Loop Header: Depth=1
movsd (%rcx,%rdx,8), %xmm0 # xmm0 = mem[0],zero
movsd %xmm0, (%rbx,%rdx,8)
incq %rdx
cmpq %rdx, %rax
jne .LBB0_5
.LBB0_6: # %._crit_edge
movq %r12, %rdi
callq free
movq %r15, %rdi
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp free # TAILCALL
.Lfunc_end0:
.size _Z7DCAhelpPdS_S_S_iS_iS_, .Lfunc_end0-_Z7DCAhelpPdS_S_S_iS_iS_
.cfi_endproc
# -- End function
.globl _Z6RecDCAPdiiS_iS_ii # -- Begin function _Z6RecDCAPdiiS_iS_ii
.p2align 4, 0x90
.type _Z6RecDCAPdiiS_iS_ii,@function
_Z6RecDCAPdiiS_iS_ii: # @_Z6RecDCAPdiiS_iS_ii
.cfi_startproc
# %bb.0:
cmpl $1, %esi
jne .LBB1_1
# %bb.5:
retq
.LBB1_1:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $72, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, %ebx
movl %esi, %r15d
movq %rdi, 16(%rsp) # 8-byte Spill
movq %r9, 24(%rsp) # 8-byte Spill
movl 136(%rsp), %eax
movq %rax, 40(%rsp) # 8-byte Spill
movl 128(%rsp), %eax
movq %rax, 56(%rsp) # 8-byte Spill
movl %esi, %ebp
andl $1, %ebp
leal (%r15,%rbp), %r12d
sarl %r12d
movq %rcx, 64(%rsp) # 8-byte Spill
movslq %r12d, %r14
imulq $1248, %r14, %rdi # imm = 0x4E0
movl %r8d, 4(%rsp) # 4-byte Spill
callq malloc
movq %rax, %r13
imulq $200, %r14, %rdi
callq malloc
movq %rax, 48(%rsp) # 8-byte Spill
movq %r14, %rax
shlq $6, %rax
leaq (%rax,%rax,2), %rdi
callq malloc
movq %rax, 8(%rsp) # 8-byte Spill
incl %ebx
movq %r13, 32(%rsp) # 8-byte Spill
movq %r13, %rdi
movq 48(%rsp), %r13 # 8-byte Reload
movl %r14d, %esi
movq 64(%rsp), %r14 # 8-byte Reload
movl %ebx, %edx
movq %r14, %rcx
movl 4(%rsp), %r8d # 4-byte Reload
movq %r13, %r9
pushq 40(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
movq 64(%rsp), %rbx # 8-byte Reload
pushq %rbx
.cfi_adjust_cfa_offset 8
callq _Z6RecDCAPdiiS_iS_ii
addq $16, %rsp
.cfi_adjust_cfa_offset -16
testl %ebx, %ebx
je .LBB1_3
# %bb.2:
movq 8(%rsp), %rdi # 8-byte Reload
movq 16(%rsp), %rsi # 8-byte Reload
movq 24(%rsp), %rdx # 8-byte Reload
movq 32(%rsp), %rbx # 8-byte Reload
movq %rbx, %rcx
movq %r13, %r8
movl %ebp, %r9d
pushq 40(%rsp) # 8-byte Folded Reload
.cfi_adjust_cfa_offset 8
pushq %r14
.cfi_adjust_cfa_offset 8
pushq %r12
.cfi_adjust_cfa_offset 8
pushq %r15
.cfi_adjust_cfa_offset 8
callq _Z15cudaDisassemblePdS_S_S_S_iiiS_i
addq $32, %rsp
.cfi_adjust_cfa_offset -32
jmp .LBB1_4
.LBB1_3:
movq 32(%rsp), %rbx # 8-byte Reload
movq %rbx, %rdi
movq %r13, %rsi
movq 16(%rsp), %rdx # 8-byte Reload
movq 24(%rsp), %rcx # 8-byte Reload
movq 8(%rsp), %r8 # 8-byte Reload
movq %r14, %r9
pushq %rbp
.cfi_adjust_cfa_offset 8
pushq %r12
.cfi_adjust_cfa_offset 8
callq _Z11DisassemblePdS_S_S_S_S_ii
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq %rbx, %rdi
callq free
movq %r13, %rdi
addq $72, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp free # TAILCALL
.Lfunc_end1:
.size _Z6RecDCAPdiiS_iS_ii, .Lfunc_end1-_Z6RecDCAPdiiS_iS_ii
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), top[depth_-1]->mutable_gpu_data());
for(int i = 0; i < depth_-1; ++i)
caffe_gpu_set(top[i]->count(), (Dtype)0., top[i]->mutable_gpu_data());
for(int m = 0; m < M_; ++m) {
for( int i = depth_-2; i >= 0; --i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
Dtype * top_data = &tops->mutable_gpu_data()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
const Dtype * bottom_data = &bottoms->gpu_data()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1.),bottom_data,top_data);
}
caffe_gpu_scal(H_*W_,(Dtype)(1./children->size()),top_data);
}
}
}
}
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if( propagate_down[0] == false )
return;
for(int m = 0; m < M_; ++m) {
for( int i = 0; i < depth_-1; ++i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
const Dtype * top_diff = &tops->gpu_diff()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
Dtype * bottom_diff = &bottoms->mutable_gpu_diff()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1./children->size()),top_diff,bottom_diff);
}
}
}
}
caffe_copy(bottom[0]->count(), top[depth_-1]->gpu_diff(), bottom[0]->mutable_gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(SuperCategoryFMLayer);
} // namespace caffe
*/ | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), top[depth_-1]->mutable_gpu_data());
for(int i = 0; i < depth_-1; ++i)
caffe_gpu_set(top[i]->count(), (Dtype)0., top[i]->mutable_gpu_data());
for(int m = 0; m < M_; ++m) {
for( int i = depth_-2; i >= 0; --i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
Dtype * top_data = &tops->mutable_gpu_data()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
const Dtype * bottom_data = &bottoms->gpu_data()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1.),bottom_data,top_data);
}
caffe_gpu_scal(H_*W_,(Dtype)(1./children->size()),top_data);
}
}
}
}
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if( propagate_down[0] == false )
return;
for(int m = 0; m < M_; ++m) {
for( int i = 0; i < depth_-1; ++i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
const Dtype * top_diff = &tops->gpu_diff()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
Dtype * bottom_diff = &bottoms->mutable_gpu_diff()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1./children->size()),top_diff,bottom_diff);
}
}
}
}
caffe_copy(bottom[0]->count(), top[depth_-1]->gpu_diff(), bottom[0]->mutable_gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(SuperCategoryFMLayer);
} // namespace caffe
*/ | .file "tmpxft_001588f5_00000000-6_super_category_fm_layer.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), top[depth_-1]->mutable_gpu_data());
for(int i = 0; i < depth_-1; ++i)
caffe_gpu_set(top[i]->count(), (Dtype)0., top[i]->mutable_gpu_data());
for(int m = 0; m < M_; ++m) {
for( int i = depth_-2; i >= 0; --i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
Dtype * top_data = &tops->mutable_gpu_data()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
const Dtype * bottom_data = &bottoms->gpu_data()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1.),bottom_data,top_data);
}
caffe_gpu_scal(H_*W_,(Dtype)(1./children->size()),top_data);
}
}
}
}
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if( propagate_down[0] == false )
return;
for(int m = 0; m < M_; ++m) {
for( int i = 0; i < depth_-1; ++i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
const Dtype * top_diff = &tops->gpu_diff()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
Dtype * bottom_diff = &bottoms->mutable_gpu_diff()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1./children->size()),top_diff,bottom_diff);
}
}
}
}
caffe_copy(bottom[0]->count(), top[depth_-1]->gpu_diff(), bottom[0]->mutable_gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(SuperCategoryFMLayer);
} // namespace caffe
*/ | #include <hip/hip_runtime.h>
/*
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), top[depth_-1]->mutable_gpu_data());
for(int i = 0; i < depth_-1; ++i)
caffe_gpu_set(top[i]->count(), (Dtype)0., top[i]->mutable_gpu_data());
for(int m = 0; m < M_; ++m) {
for( int i = depth_-2; i >= 0; --i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
Dtype * top_data = &tops->mutable_gpu_data()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
const Dtype * bottom_data = &bottoms->gpu_data()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1.),bottom_data,top_data);
}
caffe_gpu_scal(H_*W_,(Dtype)(1./children->size()),top_data);
}
}
}
}
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if( propagate_down[0] == false )
return;
for(int m = 0; m < M_; ++m) {
for( int i = 0; i < depth_-1; ++i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
const Dtype * top_diff = &tops->gpu_diff()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
Dtype * bottom_diff = &bottoms->mutable_gpu_diff()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1./children->size()),top_diff,bottom_diff);
}
}
}
}
caffe_copy(bottom[0]->count(), top[depth_-1]->gpu_diff(), bottom[0]->mutable_gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(SuperCategoryFMLayer);
} // namespace caffe
*/ |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
/*
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), top[depth_-1]->mutable_gpu_data());
for(int i = 0; i < depth_-1; ++i)
caffe_gpu_set(top[i]->count(), (Dtype)0., top[i]->mutable_gpu_data());
for(int m = 0; m < M_; ++m) {
for( int i = depth_-2; i >= 0; --i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
Dtype * top_data = &tops->mutable_gpu_data()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
const Dtype * bottom_data = &bottoms->gpu_data()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1.),bottom_data,top_data);
}
caffe_gpu_scal(H_*W_,(Dtype)(1./children->size()),top_data);
}
}
}
}
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if( propagate_down[0] == false )
return;
for(int m = 0; m < M_; ++m) {
for( int i = 0; i < depth_-1; ++i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
const Dtype * top_diff = &tops->gpu_diff()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
Dtype * bottom_diff = &bottoms->mutable_gpu_diff()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1./children->size()),top_diff,bottom_diff);
}
}
}
}
caffe_copy(bottom[0]->count(), top[depth_-1]->gpu_diff(), bottom[0]->mutable_gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(SuperCategoryFMLayer);
} // namespace caffe
*/ | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
/*
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), top[depth_-1]->mutable_gpu_data());
for(int i = 0; i < depth_-1; ++i)
caffe_gpu_set(top[i]->count(), (Dtype)0., top[i]->mutable_gpu_data());
for(int m = 0; m < M_; ++m) {
for( int i = depth_-2; i >= 0; --i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
Dtype * top_data = &tops->mutable_gpu_data()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
const Dtype * bottom_data = &bottoms->gpu_data()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1.),bottom_data,top_data);
}
caffe_gpu_scal(H_*W_,(Dtype)(1./children->size()),top_data);
}
}
}
}
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if( propagate_down[0] == false )
return;
for(int m = 0; m < M_; ++m) {
for( int i = 0; i < depth_-1; ++i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
const Dtype * top_diff = &tops->gpu_diff()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
Dtype * bottom_diff = &bottoms->mutable_gpu_diff()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1./children->size()),top_diff,bottom_diff);
}
}
}
}
caffe_copy(bottom[0]->count(), top[depth_-1]->gpu_diff(), bottom[0]->mutable_gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(SuperCategoryFMLayer);
} // namespace caffe
*/ | .text
.file "super_category_fm_layer.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001588f5_00000000-6_super_category_fm_layer.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "super_category_fm_layer.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //The max possible number of roots
#define DEV_MAX_ROOTS_NUM 3
//Number of threads per block.
#define NT 1024
//The array index in global memory.
__device__ int devArrayIndex;
//The answer array stored roots in global memory
__device__ unsigned long long int devAnswer[DEV_MAX_ROOTS_NUM];
/**
* Device kernel to find (a) cube root(s) for a given integer c with a modular n.
* <P>
* Called with a one-dimensional grid of one-dimensional blocks.
*
* @param c The mod cube (input).
* @param n The module (input).
*
* @author Junan Zhao
* @version 26-Nov-2018
*/
extern "C" __global__ void modCubeRoot( int c, int n)
{
//Determine number of threads and this thread's m (test number).
unsigned long long m = blockIdx.x*NT + threadIdx.x;
unsigned long long size = gridDim.x*NT;
for(; m<n; m+=size) //use loop to cover all range of n if n is a pretty large integer cannot covered by one round
{
unsigned long long temp = m*m;
temp = temp%n;
temp = temp*m;
temp = temp%n;
if(c==(int)temp) //once found a root
{
int oldIndex = atomicAdd(&devArrayIndex,1);
devAnswer[oldIndex] = m;
}
}
} | code for sm_80
Function : modCubeRoot
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x164] ; /* 0x0000590000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ USHF.R.S32.HI UR4, URZ, 0x1f, UR4 ; /* 0x0000001f3f047899 */
/* 0x000fe20008011404 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, 0x400, R3 ; /* 0x0000040000007824 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x000fc80003f06070 */
/*0070*/ ISETP.GE.U32.AND.EX P0, PT, RZ, UR4, PT, P0 ; /* 0x00000004ff007c0c */
/* 0x000fda000bf06100 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */
/* 0x000fe200000001ff */
/*00a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0xc] ; /* 0x00000300ff057624 */
/* 0x000fe200078e00ff */
/*00b0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fd00000000a00 */
/*00c0*/ IMAD R2, R3, R0.reuse, RZ ; /* 0x0000000003027224 */
/* 0x080fe200078e02ff */
/*00d0*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*00e0*/ IMAD.WIDE.U32 R6, R0.reuse, R0, RZ ; /* 0x0000000000067225 */
/* 0x040fe200078e00ff */
/*00f0*/ BSSY B0, 0x300 ; /* 0x0000020000007945 */
/* 0x000fe60003800000 */
/*0100*/ IMAD R9, R0, R3, R2 ; /* 0x0000000300097224 */
/* 0x000fc800078e0202 */
/*0110*/ IMAD.IADD R8, R7, 0x1, R9 ; /* 0x0000000107087824 */
/* 0x000fca00078e0209 */
/*0120*/ LOP3.LUT R2, R8, UR4, RZ, 0xfc, !PT ; /* 0x0000000408027c12 */
/* 0x000fc8000f8efcff */
/*0130*/ ISETP.NE.U32.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f05070 */
/*0140*/ @!P0 BRA 0x1c0 ; /* 0x0000007000008947 */
/* 0x000fea0003800000 */
/*0150*/ MOV R2, R6 ; /* 0x0000000600027202 */
/* 0x000fe20000000f00 */
/*0160*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff067624 */
/* 0x000fe200078e00ff */
/*0170*/ MOV R4, 0x1a0 ; /* 0x000001a000047802 */
/* 0x000fe20000000f00 */
/*0180*/ IMAD.U32 R7, RZ, RZ, UR4 ; /* 0x00000004ff077e24 */
/* 0x000fe4000f8e00ff */
/*0190*/ CALL.REL.NOINC 0x6b0 ; /* 0x0000051000007944 */
/* 0x000fea0003c00000 */
/*01a0*/ MOV R8, R2 ; /* 0x0000000200087202 */
/* 0x000fe20000000f00 */
/*01b0*/ BRA 0x2f0 ; /* 0x0000013000007947 */
/* 0x000fea0003800000 */
/*01c0*/ I2F.U32.RP R2, c[0x0][0x164] ; /* 0x0000590000027b06 */
/* 0x000e220000209000 */
/*01d0*/ ISETP.NE.U32.AND P1, PT, RZ, c[0x0][0x164], PT ; /* 0x00005900ff007a0c */
/* 0x000fce0003f25070 */
/*01e0*/ MUFU.RCP R2, R2 ; /* 0x0000000200027308 */
/* 0x001e240000001000 */
/*01f0*/ IADD3 R8, R2, 0xffffffe, RZ ; /* 0x0ffffffe02087810 */
/* 0x001fcc0007ffe0ff */
/*0200*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000064000021f000 */
/*0210*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x001fe400078e00ff */
/*0220*/ IMAD.MOV R7, RZ, RZ, -R9 ; /* 0x000000ffff077224 */
/* 0x002fc800078e0a09 */
/*0230*/ IMAD R7, R7, c[0x0][0x164], RZ ; /* 0x0000590007077a24 */
/* 0x000fc800078e02ff */
/*0240*/ IMAD.HI.U32 R9, R9, R7, R8 ; /* 0x0000000709097227 */
/* 0x000fcc00078e0008 */
/*0250*/ IMAD.HI.U32 R9, R9, R6, RZ ; /* 0x0000000609097227 */
/* 0x000fca00078e00ff */
/*0260*/ IADD3 R9, -R9, RZ, RZ ; /* 0x000000ff09097210 */
/* 0x000fca0007ffe1ff */
/*0270*/ IMAD R6, R9, c[0x0][0x164], R6 ; /* 0x0000590009067a24 */
/* 0x000fe400078e0206 */
/*0280*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */
/* 0x000fc600078e00ff */
/*0290*/ ISETP.GE.U32.AND P0, PT, R6, c[0x0][0x164], PT ; /* 0x0000590006007a0c */
/* 0x000fda0003f06070 */
/*02a0*/ @P0 IADD3 R6, R6, -c[0x0][0x164], RZ ; /* 0x8000590006060a10 */
/* 0x000fc80007ffe0ff */
/*02b0*/ ISETP.GE.U32.AND P0, PT, R6, c[0x0][0x164], PT ; /* 0x0000590006007a0c */
/* 0x000fda0003f06070 */
/*02c0*/ @P0 IADD3 R6, R6, -c[0x0][0x164], RZ ; /* 0x8000590006060a10 */
/* 0x000fe40007ffe0ff */
/*02d0*/ @!P1 LOP3.LUT R6, RZ, c[0x0][0x164], RZ, 0x33, !PT ; /* 0x00005900ff069a12 */
/* 0x000fca00078e33ff */
/*02e0*/ IMAD.MOV.U32 R8, RZ, RZ, R6 ; /* 0x000000ffff087224 */
/* 0x000fe400078e0006 */
/*02f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0300*/ IMAD R2, R9, R0.reuse, RZ ; /* 0x0000000009027224 */
/* 0x080fe200078e02ff */
/*0310*/ BSSY B0, 0x500 ; /* 0x000001e000007945 */
/* 0x000fe20003800000 */
/*0320*/ IMAD.WIDE.U32 R6, R8, R0, RZ ; /* 0x0000000008067225 */
/* 0x000fc800078e00ff */
/*0330*/ IMAD R9, R3, R8, R2 ; /* 0x0000000803097224 */
/* 0x000fca00078e0202 */
/*0340*/ IADD3 R8, R7, R9, RZ ; /* 0x0000000907087210 */
/* 0x000fc80007ffe0ff */
/*0350*/ LOP3.LUT R2, R8, UR4, RZ, 0xfc, !PT ; /* 0x0000000408027c12 */
/* 0x000fc8000f8efcff */
/*0360*/ ISETP.NE.U32.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f05070 */
/*0370*/ @!P0 BRA 0x3e0 ; /* 0x0000006000008947 */
/* 0x000fea0003800000 */
/*0380*/ IMAD.MOV.U32 R2, RZ, RZ, R6 ; /* 0x000000ffff027224 */
/* 0x000fe200078e0006 */
/*0390*/ MOV R7, UR4 ; /* 0x0000000400077c02 */
/* 0x000fe20008000f00 */
/*03a0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff067624 */
/* 0x000fe200078e00ff */
/*03b0*/ MOV R4, 0x3d0 ; /* 0x000003d000047802 */
/* 0x000fe40000000f00 */
/*03c0*/ CALL.REL.NOINC 0x6b0 ; /* 0x000002e000007944 */
/* 0x000fea0003c00000 */
/*03d0*/ BRA 0x4f0 ; /* 0x0000011000007947 */
/* 0x000fea0003800000 */
/*03e0*/ I2F.U32.RP R4, c[0x0][0x164] ; /* 0x0000590000047b06 */
/* 0x000e220000209000 */
/*03f0*/ ISETP.NE.U32.AND P1, PT, RZ, c[0x0][0x164], PT ; /* 0x00005900ff007a0c */
/* 0x000fce0003f25070 */
/*0400*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e240000001000 */
/*0410*/ IADD3 R8, R4, 0xffffffe, RZ ; /* 0x0ffffffe04087810 */
/* 0x001fcc0007ffe0ff */
/*0420*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000064000021f000 */
/*0430*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x001fe400078e00ff */
/*0440*/ IMAD.MOV R7, RZ, RZ, -R9 ; /* 0x000000ffff077224 */
/* 0x002fc800078e0a09 */
/*0450*/ IMAD R7, R7, c[0x0][0x164], RZ ; /* 0x0000590007077a24 */
/* 0x000fc800078e02ff */
/*0460*/ IMAD.HI.U32 R7, R9, R7, R8 ; /* 0x0000000709077227 */
/* 0x000fcc00078e0008 */
/*0470*/ IMAD.HI.U32 R2, R7, R6, RZ ; /* 0x0000000607027227 */
/* 0x000fca00078e00ff */
/*0480*/ IADD3 R7, -R2, RZ, RZ ; /* 0x000000ff02077210 */
/* 0x000fca0007ffe1ff */
/*0490*/ IMAD R2, R7, c[0x0][0x164], R6 ; /* 0x0000590007027a24 */
/* 0x000fca00078e0206 */
/*04a0*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x164], PT ; /* 0x0000590002007a0c */
/* 0x000fda0003f06070 */
/*04b0*/ @P0 IADD3 R2, R2, -c[0x0][0x164], RZ ; /* 0x8000590002020a10 */
/* 0x000fc80007ffe0ff */
/*04c0*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x164], PT ; /* 0x0000590002007a0c */
/* 0x000fda0003f06070 */
/*04d0*/ @P0 IADD3 R2, R2, -c[0x0][0x164], RZ ; /* 0x8000590002020a10 */
/* 0x000fe40007ffe0ff */
/*04e0*/ @!P1 LOP3.LUT R2, RZ, c[0x0][0x164], RZ, 0x33, !PT ; /* 0x00005900ff029a12 */
/* 0x000fe400078e33ff */
/*04f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0500*/ ISETP.NE.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */
/* 0x000fe20003f05270 */
/*0510*/ BSSY B0, 0x650 ; /* 0x0000013000007945 */
/* 0x000fd80003800000 */
/*0520*/ @P0 BRA 0x640 ; /* 0x0000011000000947 */
/* 0x000fea0003800000 */
/*0530*/ S2R R7, SR_LANEID ; /* 0x0000000000077919 */
/* 0x000e220000000000 */
/*0540*/ VOTEU.ANY UR5, UPT, PT ; /* 0x0000000000057886 */
/* 0x000fe200038e0100 */
/*0550*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x4][0x0] ; /* 0x01000000ff067624 */
/* 0x000fe200078e00ff */
/*0560*/ FLO.U32 R2, UR5 ; /* 0x0000000500027d00 */
/* 0x000e3000080e0000 */
/*0570*/ POPC R11, UR5 ; /* 0x00000005000b7d09 */
/* 0x000e620008000000 */
/*0580*/ ISETP.EQ.U32.AND P0, PT, R2, R7, PT ; /* 0x000000070200720c */
/* 0x001fe20003f02070 */
/*0590*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x4][0x4] ; /* 0x01000100ff077624 */
/* 0x000fd800078e00ff */
/*05a0*/ @P0 ATOMG.E.ADD.STRONG.GPU PT, R7, [R6.64], R11 ; /* 0x0000000b060709a8 */
/* 0x002ea200081ee1c6 */
/*05b0*/ HFMA2.MMA R13, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0d7435 */
/* 0x000fc600000001ff */
/*05c0*/ S2R R4, SR_LTMASK ; /* 0x0000000000047919 */
/* 0x000e240000003900 */
/*05d0*/ LOP3.LUT R4, R4, UR5, RZ, 0xc0, !PT ; /* 0x0000000504047c12 */
/* 0x001fc8000f8ec0ff */
/*05e0*/ POPC R9, R4 ; /* 0x0000000400097309 */
/* 0x000e220000000000 */
/*05f0*/ SHFL.IDX PT, R8, R7, R2, 0x1f ; /* 0x00001f0207087589 */
/* 0x00422400000e0000 */
/*0600*/ IMAD.MOV.U32 R2, RZ, RZ, R0 ; /* 0x000000ffff027224 */
/* 0x002fe400078e0000 */
/*0610*/ IMAD.IADD R8, R8, 0x1, R9 ; /* 0x0000000108087824 */
/* 0x001fc800078e0209 */
/*0620*/ IMAD.WIDE R8, R8, R13, c[0x4][0x8] ; /* 0x0100020008087625 */
/* 0x000fca00078e020d */
/*0630*/ STG.E.64 [R8.64], R2 ; /* 0x0000000208007986 */
/* 0x0001e4000c101b06 */
/*0640*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0650*/ LEA R0, P0, R5, R0, 0xa ; /* 0x0000000005007211 */
/* 0x000fc800078050ff */
/*0660*/ IADD3.X R3, RZ, R3, RZ, P0, !PT ; /* 0x00000003ff037210 */
/* 0x001fe400007fe4ff */
/*0670*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x000fc80003f06070 */
/*0680*/ ISETP.GE.U32.AND.EX P0, PT, R3, UR4, PT, P0 ; /* 0x0000000403007c0c */
/* 0x000fda000bf06100 */
/*0690*/ @!P0 BRA 0xc0 ; /* 0xfffffa2000008947 */
/* 0x000fea000383ffff */
/*06a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*06b0*/ I2F.U64.RP R9, R6 ; /* 0x0000000600097312 */
/* 0x000e300000309000 */
/*06c0*/ MUFU.RCP R9, R9 ; /* 0x0000000900097308 */
/* 0x001e240000001000 */
/*06d0*/ IADD3 R10, R9, 0x1ffffffe, RZ ; /* 0x1ffffffe090a7810 */
/* 0x001fcc0007ffe0ff */
/*06e0*/ F2I.U64.TRUNC R10, R10 ; /* 0x0000000a000a7311 */
/* 0x000e24000020d800 */
/*06f0*/ IMAD.WIDE.U32 R12, R10, R6, RZ ; /* 0x000000060a0c7225 */
/* 0x001fc800078e00ff */
/*0700*/ IMAD R13, R10, R7, R13 ; /* 0x000000070a0d7224 */
/* 0x000fe200078e020d */
/*0710*/ IADD3 R15, P0, RZ, -R12, RZ ; /* 0x8000000cff0f7210 */
/* 0x000fc60007f1e0ff */
/*0720*/ IMAD R13, R11, R6, R13 ; /* 0x000000060b0d7224 */
/* 0x000fe400078e020d */
/*0730*/ IMAD.HI.U32 R12, R10, R15, RZ ; /* 0x0000000f0a0c7227 */
/* 0x000fc800078e00ff */
/*0740*/ IMAD.X R17, RZ, RZ, ~R13, P0 ; /* 0x000000ffff117224 */
/* 0x000fe400000e0e0d */
/*0750*/ IMAD.MOV.U32 R13, RZ, RZ, R10 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e000a */
/*0760*/ IMAD R19, R11, R17.reuse, RZ ; /* 0x000000110b137224 */
/* 0x080fe400078e02ff */
/*0770*/ IMAD.WIDE.U32 R12, P0, R10, R17, R12 ; /* 0x000000110a0c7225 */
/* 0x000fc8000780000c */
/*0780*/ IMAD.HI.U32 R9, R11, R17, RZ ; /* 0x000000110b097227 */
/* 0x000fc800078e00ff */
/*0790*/ IMAD.HI.U32 R12, P1, R11, R15, R12 ; /* 0x0000000f0b0c7227 */
/* 0x000fe2000782000c */
/*07a0*/ IADD3.X R9, R9, R11, RZ, P0, !PT ; /* 0x0000000b09097210 */
/* 0x000fc800007fe4ff */
/*07b0*/ IADD3 R13, P2, R19, R12, RZ ; /* 0x0000000c130d7210 */
/* 0x000fc80007f5e0ff */
/*07c0*/ IADD3.X R9, RZ, RZ, R9, P2, P1 ; /* 0x000000ffff097210 */
/* 0x000fe200017e2409 */
/*07d0*/ IMAD.WIDE.U32 R10, R13, R6, RZ ; /* 0x000000060d0a7225 */
/* 0x000fc800078e00ff */
/*07e0*/ IMAD R11, R13, R7, R11 ; /* 0x000000070d0b7224 */
/* 0x000fe200078e020b */
/*07f0*/ IADD3 R15, P0, RZ, -R10, RZ ; /* 0x8000000aff0f7210 */
/* 0x000fc60007f1e0ff */
/*0800*/ IMAD R11, R9, R6, R11 ; /* 0x00000006090b7224 */
/* 0x000fe400078e020b */
/*0810*/ IMAD.HI.U32 R12, R13, R15, RZ ; /* 0x0000000f0d0c7227 */
/* 0x000fc800078e00ff */
/*0820*/ IMAD.X R10, RZ, RZ, ~R11, P0 ; /* 0x000000ffff0a7224 */
/* 0x000fe400000e0e0b */
/*0830*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e00ff */
/*0840*/ IMAD.WIDE.U32 R12, P0, R13, R10, R12 ; /* 0x0000000a0d0c7225 */
/* 0x000fc8000780000c */
/*0850*/ IMAD R14, R9.reuse, R10, RZ ; /* 0x0000000a090e7224 */
/* 0x040fe400078e02ff */
/*0860*/ IMAD.HI.U32 R13, P1, R9, R15, R12 ; /* 0x0000000f090d7227 */
/* 0x000fc8000782000c */
/*0870*/ IMAD.HI.U32 R10, R9, R10, RZ ; /* 0x0000000a090a7227 */
/* 0x000fe200078e00ff */
/*0880*/ IADD3 R13, P2, R14, R13, RZ ; /* 0x0000000d0e0d7210 */
/* 0x000fc80007f5e0ff */
/*0890*/ IADD3.X R9, R10, R9, RZ, P0, !PT ; /* 0x000000090a097210 */
/* 0x000fe200007fe4ff */
/*08a0*/ IMAD.HI.U32 R10, R13, R2, RZ ; /* 0x000000020d0a7227 */
/* 0x000fc600078e00ff */
/*08b0*/ IADD3.X R9, RZ, RZ, R9, P2, P1 ; /* 0x000000ffff097210 */
/* 0x000fc600017e2409 */
/*08c0*/ IMAD.WIDE.U32 R10, R13, R8, R10 ; /* 0x000000080d0a7225 */
/* 0x000fc800078e000a */
/*08d0*/ IMAD R13, R9.reuse, R8, RZ ; /* 0x00000008090d7224 */
/* 0x040fe400078e02ff */
/*08e0*/ IMAD.HI.U32 R10, P0, R9, R2, R10 ; /* 0x00000002090a7227 */
/* 0x000fc8000780000a */
/*08f0*/ IMAD.HI.U32 R9, R9, R8, RZ ; /* 0x0000000809097227 */
/* 0x000fe200078e00ff */
/*0900*/ IADD3 R13, P1, R13, R10, RZ ; /* 0x0000000a0d0d7210 */
/* 0x000fc80007f3e0ff */
/*0910*/ IADD3.X R9, R9, RZ, RZ, P0, !PT ; /* 0x000000ff09097210 */
/* 0x000fe200007fe4ff */
/*0920*/ IMAD.WIDE.U32 R10, R13, R6, RZ ; /* 0x000000060d0a7225 */
/* 0x000fc800078e00ff */
/*0930*/ IMAD.X R9, RZ, RZ, R9, P1 ; /* 0x000000ffff097224 */
/* 0x000fe200008e0609 */
/*0940*/ IADD3 R15, P1, -R10, R2, RZ ; /* 0x000000020a0f7210 */
/* 0x000fe20007f3e1ff */
/*0950*/ IMAD R13, R13, R7, R11 ; /* 0x000000070d0d7224 */
/* 0x000fc600078e020b */
/*0960*/ ISETP.GE.U32.AND P0, PT, R15, R6.reuse, PT ; /* 0x000000060f00720c */
/* 0x080fe20003f06070 */
/*0970*/ IMAD R9, R9, R6, R13 ; /* 0x0000000609097224 */
/* 0x000fca00078e020d */
/*0980*/ IADD3.X R2, ~R9, R8, RZ, P1, !PT ; /* 0x0000000809027210 */
/* 0x000fe40000ffe5ff */
/*0990*/ IADD3 R11, P1, R15, -R6, RZ ; /* 0x800000060f0b7210 */
/* 0x000fe40007f3e0ff */
/*09a0*/ ISETP.GE.U32.AND.EX P0, PT, R2, R7, PT, P0 ; /* 0x000000070200720c */
/* 0x000fc60003f06100 */
/*09b0*/ IMAD.X R8, R2, 0x1, ~R7, P1 ; /* 0x0000000102087824 */
/* 0x000fe200008e0e07 */
/*09c0*/ SEL R11, R11, R15, P0 ; /* 0x0000000f0b0b7207 */
/* 0x000fe40000000000 */
/*09d0*/ ISETP.NE.U32.AND P1, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f25070 */
/*09e0*/ SEL R8, R8, R2, P0 ; /* 0x0000000208087207 */
/* 0x000fe40000000000 */
/*09f0*/ ISETP.GE.U32.AND P0, PT, R11.reuse, R6.reuse, PT ; /* 0x000000060b00720c */
/* 0x0c0fe40003f06070 */
/*0a00*/ IADD3 R2, P2, R11, -R6, RZ ; /* 0x800000060b027210 */
/* 0x000fe20007f5e0ff */
/*0a10*/ IMAD.MOV.U32 R6, RZ, RZ, R4 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0004 */
/*0a20*/ ISETP.GE.U32.AND.EX P0, PT, R8, R7, PT, P0 ; /* 0x000000070800720c */
/* 0x000fc40003f06100 */
/*0a30*/ IADD3.X R9, ~R7.reuse, R8.reuse, RZ, P2, !PT ; /* 0x0000000807097210 */
/* 0x0c0fe400017fe5ff */
/*0a40*/ ISETP.NE.AND.EX P1, PT, R7, RZ, PT, P1 ; /* 0x000000ff0700720c */
/* 0x000fe40003f25310 */
/*0a50*/ MOV R7, 0x0 ; /* 0x0000000000077802 */
/* 0x000fe40000000f00 */
/*0a60*/ SEL R2, R2, R11, P0 ; /* 0x0000000b02027207 */
/* 0x000fe40000000000 */
/*0a70*/ SEL R9, R9, R8, P0 ; /* 0x0000000809097207 */
/* 0x000fe40000000000 */
/*0a80*/ SEL R2, R2, 0xffffffff, P1 ; /* 0xffffffff02027807 */
/* 0x000fc40000800000 */
/*0a90*/ SEL R9, R9, 0xffffffff, P1 ; /* 0xffffffff09097807 */
/* 0x000fe20000800000 */
/*0aa0*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff55006007950 */
/* 0x000fec0003c3ffff */
/*0ab0*/ BRA 0xab0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //The max possible number of roots
#define DEV_MAX_ROOTS_NUM 3
//Number of threads per block.
#define NT 1024
//The array index in global memory.
__device__ int devArrayIndex;
//The answer array stored roots in global memory
__device__ unsigned long long int devAnswer[DEV_MAX_ROOTS_NUM];
/**
* Device kernel to find (a) cube root(s) for a given integer c with a modular n.
* <P>
* Called with a one-dimensional grid of one-dimensional blocks.
*
* @param c The mod cube (input).
* @param n The module (input).
*
* @author Junan Zhao
* @version 26-Nov-2018
*/
extern "C" __global__ void modCubeRoot( int c, int n)
{
//Determine number of threads and this thread's m (test number).
unsigned long long m = blockIdx.x*NT + threadIdx.x;
unsigned long long size = gridDim.x*NT;
for(; m<n; m+=size) //use loop to cover all range of n if n is a pretty large integer cannot covered by one round
{
unsigned long long temp = m*m;
temp = temp%n;
temp = temp*m;
temp = temp%n;
if(c==(int)temp) //once found a root
{
int oldIndex = atomicAdd(&devArrayIndex,1);
devAnswer[oldIndex] = m;
}
}
} | .file "tmpxft_00091e3e_00000000-6_ModCubeRoot.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z11modCubeRootiiii
.type _Z31__device_stub__Z11modCubeRootiiii, @function
_Z31__device_stub__Z11modCubeRootiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq modCubeRoot(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z11modCubeRootiiii, .-_Z31__device_stub__Z11modCubeRootiiii
.globl modCubeRoot
.type modCubeRoot, @function
modCubeRoot:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z11modCubeRootiiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size modCubeRoot, .-modCubeRoot
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "modCubeRoot"
.LC1:
.string "devArrayIndex"
.LC2:
.string "devAnswer"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq modCubeRoot(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13devArrayIndex(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $24, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL9devAnswer(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL9devAnswer
.comm _ZL9devAnswer,24,16
.local _ZL13devArrayIndex
.comm _ZL13devArrayIndex,4,4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //The max possible number of roots
#define DEV_MAX_ROOTS_NUM 3
//Number of threads per block.
#define NT 1024
//The array index in global memory.
__device__ int devArrayIndex;
//The answer array stored roots in global memory
__device__ unsigned long long int devAnswer[DEV_MAX_ROOTS_NUM];
/**
* Device kernel to find (a) cube root(s) for a given integer c with a modular n.
* <P>
* Called with a one-dimensional grid of one-dimensional blocks.
*
* @param c The mod cube (input).
* @param n The module (input).
*
* @author Junan Zhao
* @version 26-Nov-2018
*/
extern "C" __global__ void modCubeRoot( int c, int n)
{
//Determine number of threads and this thread's m (test number).
unsigned long long m = blockIdx.x*NT + threadIdx.x;
unsigned long long size = gridDim.x*NT;
for(; m<n; m+=size) //use loop to cover all range of n if n is a pretty large integer cannot covered by one round
{
unsigned long long temp = m*m;
temp = temp%n;
temp = temp*m;
temp = temp%n;
if(c==(int)temp) //once found a root
{
int oldIndex = atomicAdd(&devArrayIndex,1);
devAnswer[oldIndex] = m;
}
}
} | #include <hip/hip_runtime.h>
//The max possible number of roots
#define DEV_MAX_ROOTS_NUM 3
//Number of threads per block.
#define NT 1024
//The array index in global memory.
__device__ int devArrayIndex;
//The answer array stored roots in global memory
__device__ unsigned long long int devAnswer[DEV_MAX_ROOTS_NUM];
/**
* Device kernel to find (a) cube root(s) for a given integer c with a modular n.
* <P>
* Called with a one-dimensional grid of one-dimensional blocks.
*
* @param c The mod cube (input).
* @param n The module (input).
*
* @author Junan Zhao
* @version 26-Nov-2018
*/
extern "C" __global__ void modCubeRoot( int c, int n)
{
//Determine number of threads and this thread's m (test number).
unsigned long long m = blockIdx.x*NT + threadIdx.x;
unsigned long long size = gridDim.x*NT;
for(; m<n; m+=size) //use loop to cover all range of n if n is a pretty large integer cannot covered by one round
{
unsigned long long temp = m*m;
temp = temp%n;
temp = temp*m;
temp = temp%n;
if(c==(int)temp) //once found a root
{
int oldIndex = atomicAdd(&devArrayIndex,1);
devAnswer[oldIndex] = m;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
//The max possible number of roots
#define DEV_MAX_ROOTS_NUM 3
//Number of threads per block.
#define NT 1024
//The array index in global memory.
__device__ int devArrayIndex;
//The answer array stored roots in global memory
__device__ unsigned long long int devAnswer[DEV_MAX_ROOTS_NUM];
/**
* Device kernel to find (a) cube root(s) for a given integer c with a modular n.
* <P>
* Called with a one-dimensional grid of one-dimensional blocks.
*
* @param c The mod cube (input).
* @param n The module (input).
*
* @author Junan Zhao
* @version 26-Nov-2018
*/
extern "C" __global__ void modCubeRoot( int c, int n)
{
//Determine number of threads and this thread's m (test number).
unsigned long long m = blockIdx.x*NT + threadIdx.x;
unsigned long long size = gridDim.x*NT;
for(; m<n; m+=size) //use loop to cover all range of n if n is a pretty large integer cannot covered by one round
{
unsigned long long temp = m*m;
temp = temp%n;
temp = temp*m;
temp = temp%n;
if(c==(int)temp) //once found a root
{
int oldIndex = atomicAdd(&devArrayIndex,1);
devAnswer[oldIndex] = m;
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected modCubeRoot
.globl modCubeRoot
.p2align 8
.type modCubeRoot,@function
modCubeRoot:
s_load_b32 s2, s[0:1], 0x4
v_mov_b32_e32 v1, 0
v_lshl_or_b32 v3, s15, 10, v0
s_mov_b32 s4, exec_lo
s_delay_alu instid0(VALU_DEP_2)
v_mov_b32_e32 v4, v1
s_waitcnt lgkmcnt(0)
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmpx_gt_u64_e64 s[2:3], v[3:4]
s_cbranch_execz .LBB0_15
v_cvt_f32_u32_e32 v0, s2
v_cvt_f32_u32_e32 v2, s3
s_clause 0x1
s_load_b32 s4, s[0:1], 0x8
s_load_b32 s1, s[0:1], 0x0
s_mov_b32 s5, 0
s_sub_i32 s6, 0, s2
v_fmamk_f32 v0, v2, 0x4f800000, v0
s_mov_b32 s7, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x5f7ffffc, v0
s_waitcnt lgkmcnt(0)
s_lshl_b32 s4, s4, 10
v_mul_f32_e32 v2, 0x2f800000, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_trunc_f32_e32 v2, v2
v_fmamk_f32 v0, v2, 0xcf800000, v0
v_cvt_u32_f32_e32 v9, v2
s_delay_alu instid0(VALU_DEP_2)
v_cvt_u32_f32_e32 v0, v0
s_branch .LBB0_4
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, s8, v2
s_getpc_b64 s[8:9]
s_add_u32 s8, s8, devAnswer@rel32@lo+4
s_addc_u32 s9, s9, devAnswer@rel32@hi+12
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 3, v[5:6]
v_add_co_u32 v5, vcc_lo, v5, s8
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s9, v6, vcc_lo
global_store_b64 v[5:6], v[3:4], off
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s0
v_add_co_u32 v3, vcc_lo, v3, s4
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_u64_e32 vcc_lo, s[2:3], v[3:4]
s_or_b32 s7, vcc_lo, s7
s_and_not1_b32 exec_lo, exec_lo, s7
s_cbranch_execz .LBB0_15
.LBB0_4:
v_mul_lo_u32 v2, v3, v4
v_mad_u64_u32 v[5:6], null, v3, v3, 0
s_mov_b32 s0, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v6, v6, v2, v2
v_or_b32_e32 v2, s3, v6
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_ne_u64_e32 0, v[1:2]
s_xor_b32 s8, exec_lo, s0
s_cbranch_execz .LBB0_6
s_sub_u32 s0, 0, s2
s_subb_u32 s9, 0, s3
v_mul_hi_u32 v2, s0, v0
v_mul_lo_u32 v7, s0, v9
v_mul_lo_u32 v8, s9, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v2, v7
v_mul_lo_u32 v7, s0, v0
v_add_nc_u32_e32 v2, v2, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v8, v0, v7
v_mul_lo_u32 v10, v0, v2
v_mul_hi_u32 v11, v0, v2
v_mul_hi_u32 v12, v9, v7
v_mul_lo_u32 v7, v9, v7
v_mul_hi_u32 v13, v9, v2
v_mul_lo_u32 v2, v9, v2
v_add_co_u32 v8, vcc_lo, v8, v10
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v7, vcc_lo, v8, v7
v_add_co_ci_u32_e32 v7, vcc_lo, v10, v12, vcc_lo
v_add_co_ci_u32_e32 v8, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v7, v2
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v0, v2
v_add_co_ci_u32_e32 v7, vcc_lo, v9, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v8, s0, v2
v_mul_lo_u32 v11, s9, v2
v_mul_lo_u32 v10, s0, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, v8, v10
v_mul_lo_u32 v10, s0, v2
v_add_nc_u32_e32 v8, v8, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v11, v2, v10
v_mul_lo_u32 v12, v2, v8
v_mul_hi_u32 v13, v2, v8
v_mul_hi_u32 v14, v7, v10
v_mul_lo_u32 v10, v7, v10
v_mul_hi_u32 v15, v7, v8
v_mul_lo_u32 v8, v7, v8
v_add_co_u32 v11, vcc_lo, v11, v12
v_add_co_ci_u32_e32 v12, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, v11, v10
v_add_co_ci_u32_e32 v10, vcc_lo, v12, v14, vcc_lo
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v15, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, v10, v8
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v8
v_add_co_ci_u32_e32 v14, vcc_lo, v7, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v15, v5, v2
v_mad_u64_u32 v[10:11], null, v6, v2, 0
v_mad_u64_u32 v[7:8], null, v5, v14, 0
v_mad_u64_u32 v[12:13], null, v6, v14, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, v15, v7
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v10
v_add_co_ci_u32_e32 v2, vcc_lo, v7, v11, vcc_lo
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v12
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_lo_u32 v11, s3, v2
v_mad_u64_u32 v[7:8], null, s2, v2, 0
v_mul_lo_u32 v2, s2, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_co_u32 v5, vcc_lo, v5, v7
v_add3_u32 v2, v8, v2, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v8, v6, v2
v_subrev_co_ci_u32_e64 v7, s0, s3, v8, vcc_lo
v_sub_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo
v_sub_co_u32 v6, vcc_lo, v5, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_subrev_co_ci_u32_e64 v8, s0, 0, v7, vcc_lo
v_cmp_le_u32_e64 s0, s2, v5
v_subrev_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s3, v2
v_cndmask_b32_e64 v10, 0, -1, s0
v_cmp_le_u32_e64 s0, s2, v6
v_cndmask_b32_e64 v13, 0, -1, vcc_lo
v_cmp_eq_u32_e32 vcc_lo, s3, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cndmask_b32_e64 v11, 0, -1, s0
v_cmp_le_u32_e64 s0, s3, v8
v_cndmask_b32_e64 v12, 0, -1, s0
v_cmp_eq_u32_e64 s0, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v11, v12, v11, vcc_lo
v_sub_co_u32 v12, vcc_lo, v6, s2
v_subrev_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
v_cmp_ne_u32_e32 vcc_lo, 0, v11
v_cndmask_b32_e64 v10, v13, v10, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_cndmask_b32 v7, v8, v7 :: v_dual_cndmask_b32 v6, v6, v12
v_cmp_ne_u32_e32 vcc_lo, 0, v10
s_delay_alu instid0(VALU_DEP_2)
v_dual_cndmask_b32 v8, v2, v7 :: v_dual_cndmask_b32 v7, v5, v6
.LBB0_6:
s_or_saveexec_b32 s0, s8
v_cvt_f32_u32_e32 v10, s2
s_xor_b32 exec_lo, exec_lo, s0
s_cbranch_execz .LBB0_8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v10
v_mov_b32_e32 v8, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v6, s6, v2
v_mul_hi_u32 v6, v2, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v6
v_mul_hi_u32 v2, v5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, v2, s2
v_sub_nc_u32_e32 v2, v5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
v_cndmask_b32_e32 v7, v2, v5, vcc_lo
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s0
v_mul_lo_u32 v2, v8, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mul_lo_u32 v8, v7, v4
v_mad_u64_u32 v[5:6], null, v7, v3, 0
s_mov_b32 s0, exec_lo
v_add3_u32 v6, v6, v8, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_or_b32_e32 v2, s3, v6
v_cmpx_ne_u64_e32 0, v[1:2]
s_xor_b32 s8, exec_lo, s0
s_cbranch_execz .LBB0_10
s_sub_u32 s0, 0, s2
s_subb_u32 s9, 0, s3
v_mul_hi_u32 v2, s0, v0
v_mul_lo_u32 v7, s0, v9
v_mul_lo_u32 v8, s9, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v2, v7
v_mul_lo_u32 v7, s0, v0
v_add_nc_u32_e32 v2, v2, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v8, v0, v7
v_mul_lo_u32 v10, v0, v2
v_mul_hi_u32 v11, v0, v2
v_mul_hi_u32 v12, v9, v7
v_mul_lo_u32 v7, v9, v7
v_mul_hi_u32 v13, v9, v2
v_mul_lo_u32 v2, v9, v2
v_add_co_u32 v8, vcc_lo, v8, v10
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v7, vcc_lo, v8, v7
v_add_co_ci_u32_e32 v7, vcc_lo, v10, v12, vcc_lo
v_add_co_ci_u32_e32 v8, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v7, v2
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v0, v2
v_add_co_ci_u32_e32 v7, vcc_lo, v9, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v8, s0, v2
v_mul_lo_u32 v11, s9, v2
v_mul_lo_u32 v10, s0, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, v8, v10
v_mul_lo_u32 v10, s0, v2
v_add_nc_u32_e32 v8, v8, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v11, v2, v10
v_mul_lo_u32 v12, v2, v8
v_mul_hi_u32 v13, v2, v8
v_mul_hi_u32 v14, v7, v10
v_mul_lo_u32 v10, v7, v10
v_mul_hi_u32 v15, v7, v8
v_mul_lo_u32 v8, v7, v8
v_add_co_u32 v11, vcc_lo, v11, v12
v_add_co_ci_u32_e32 v12, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, v11, v10
v_add_co_ci_u32_e32 v10, vcc_lo, v12, v14, vcc_lo
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v15, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, v10, v8
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v8
v_add_co_ci_u32_e32 v14, vcc_lo, v7, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v15, v5, v2
v_mad_u64_u32 v[10:11], null, v6, v2, 0
v_mad_u64_u32 v[7:8], null, v5, v14, 0
v_mad_u64_u32 v[12:13], null, v6, v14, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, v15, v7
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v10
v_add_co_ci_u32_e32 v2, vcc_lo, v7, v11, vcc_lo
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v12
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_lo_u32 v11, s3, v2
v_mad_u64_u32 v[7:8], null, s2, v2, 0
v_mul_lo_u32 v2, s2, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_co_u32 v5, vcc_lo, v5, v7
v_add3_u32 v2, v8, v2, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v8, v6, v2
v_subrev_co_ci_u32_e64 v7, s0, s3, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_sub_co_u32 v8, s0, v5, s2
v_sub_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo
v_subrev_co_ci_u32_e64 v7, s0, 0, v7, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s2, v8
v_cndmask_b32_e64 v6, 0, -1, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s3, v7
v_cndmask_b32_e64 v10, 0, -1, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s2, v5
v_cndmask_b32_e64 v11, 0, -1, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s3, v2
v_cndmask_b32_e64 v12, 0, -1, vcc_lo
v_cmp_eq_u32_e32 vcc_lo, s3, v7
v_cndmask_b32_e32 v6, v10, v6, vcc_lo
v_sub_co_u32 v7, vcc_lo, v8, s2
v_cmp_eq_u32_e32 vcc_lo, s3, v2
v_cndmask_b32_e32 v2, v12, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cmp_ne_u32_e32 vcc_lo, 0, v6
v_cndmask_b32_e32 v6, v8, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_ne_u32_e32 vcc_lo, 0, v2
v_cndmask_b32_e32 v7, v5, v6, vcc_lo
.LBB0_10:
s_and_not1_saveexec_b32 s0, s8
s_cbranch_execz .LBB0_12
v_rcp_iflag_f32_e32 v2, v10
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v2, v2
v_mul_lo_u32 v6, s6, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v6, v2, v6
v_add_nc_u32_e32 v2, v2, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v2, v5, v2
v_mul_lo_u32 v2, v2, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v2, v5, v2
v_subrev_nc_u32_e32 v5, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
v_subrev_nc_u32_e32 v5, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_cndmask_b32_e32 v7, v2, v5, vcc_lo
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_mov_b32 s0, exec_lo
v_cmpx_eq_u32_e64 s1, v7
s_cbranch_execz .LBB0_3
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v2, s9, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v2
s_cbranch_execz .LBB0_2
s_bcnt1_i32_b32 s9, s9
s_getpc_b64 s[10:11]
s_add_u32 s10, s10, devArrayIndex@rel32@lo+4
s_addc_u32 s11, s11, devArrayIndex@rel32@hi+12
v_mov_b32_e32 v5, s9
global_atomic_add_u32 v5, v1, v5, s[10:11] glc
s_branch .LBB0_2
.LBB0_15:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel modCubeRoot
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 16
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size modCubeRoot, .Lfunc_end0-modCubeRoot
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected devArrayIndex
.type devArrayIndex,@object
.section .bss,"aw",@nobits
.globl devArrayIndex
.p2align 2, 0x0
devArrayIndex:
.long 0
.size devArrayIndex, 4
.protected devAnswer
.type devAnswer,@object
.globl devAnswer
.p2align 4, 0x0
devAnswer:
.zero 24
.size devAnswer, 24
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym devArrayIndex
.addrsig_sym devAnswer
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: modCubeRoot
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: modCubeRoot.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 16
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
//The max possible number of roots
#define DEV_MAX_ROOTS_NUM 3
//Number of threads per block.
#define NT 1024
//The array index in global memory.
__device__ int devArrayIndex;
//The answer array stored roots in global memory
__device__ unsigned long long int devAnswer[DEV_MAX_ROOTS_NUM];
/**
* Device kernel to find (a) cube root(s) for a given integer c with a modular n.
* <P>
* Called with a one-dimensional grid of one-dimensional blocks.
*
* @param c The mod cube (input).
* @param n The module (input).
*
* @author Junan Zhao
* @version 26-Nov-2018
*/
extern "C" __global__ void modCubeRoot( int c, int n)
{
//Determine number of threads and this thread's m (test number).
unsigned long long m = blockIdx.x*NT + threadIdx.x;
unsigned long long size = gridDim.x*NT;
for(; m<n; m+=size) //use loop to cover all range of n if n is a pretty large integer cannot covered by one round
{
unsigned long long temp = m*m;
temp = temp%n;
temp = temp*m;
temp = temp%n;
if(c==(int)temp) //once found a root
{
int oldIndex = atomicAdd(&devArrayIndex,1);
devAnswer[oldIndex] = m;
}
}
} | .text
.file "ModCubeRoot.hip"
.globl __device_stub__modCubeRoot # -- Begin function __device_stub__modCubeRoot
.p2align 4, 0x90
.type __device_stub__modCubeRoot,@function
__device_stub__modCubeRoot: # @__device_stub__modCubeRoot
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
leaq 12(%rsp), %rax
movq %rax, 64(%rsp)
leaq 8(%rsp), %rax
movq %rax, 72(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 64(%rsp), %r9
movl $modCubeRoot, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size __device_stub__modCubeRoot, .Lfunc_end0-__device_stub__modCubeRoot
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $modCubeRoot, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $devArrayIndex, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $devAnswer, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $24, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type devArrayIndex,@object # @devArrayIndex
.local devArrayIndex
.comm devArrayIndex,4,4
.type devAnswer,@object # @devAnswer
.local devAnswer
.comm devAnswer,24,16
.type modCubeRoot,@object # @modCubeRoot
.section .rodata,"a",@progbits
.globl modCubeRoot
.p2align 3, 0x0
modCubeRoot:
.quad __device_stub__modCubeRoot
.size modCubeRoot, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "modCubeRoot"
.size .L__unnamed_1, 12
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "devArrayIndex"
.size .L__unnamed_2, 14
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "devAnswer"
.size .L__unnamed_3, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__modCubeRoot
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym devArrayIndex
.addrsig_sym devAnswer
.addrsig_sym modCubeRoot
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : modCubeRoot
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x164] ; /* 0x0000590000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ USHF.R.S32.HI UR4, URZ, 0x1f, UR4 ; /* 0x0000001f3f047899 */
/* 0x000fe20008011404 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, 0x400, R3 ; /* 0x0000040000007824 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x000fc80003f06070 */
/*0070*/ ISETP.GE.U32.AND.EX P0, PT, RZ, UR4, PT, P0 ; /* 0x00000004ff007c0c */
/* 0x000fda000bf06100 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */
/* 0x000fe200000001ff */
/*00a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0xc] ; /* 0x00000300ff057624 */
/* 0x000fe200078e00ff */
/*00b0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fd00000000a00 */
/*00c0*/ IMAD R2, R3, R0.reuse, RZ ; /* 0x0000000003027224 */
/* 0x080fe200078e02ff */
/*00d0*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*00e0*/ IMAD.WIDE.U32 R6, R0.reuse, R0, RZ ; /* 0x0000000000067225 */
/* 0x040fe200078e00ff */
/*00f0*/ BSSY B0, 0x300 ; /* 0x0000020000007945 */
/* 0x000fe60003800000 */
/*0100*/ IMAD R9, R0, R3, R2 ; /* 0x0000000300097224 */
/* 0x000fc800078e0202 */
/*0110*/ IMAD.IADD R8, R7, 0x1, R9 ; /* 0x0000000107087824 */
/* 0x000fca00078e0209 */
/*0120*/ LOP3.LUT R2, R8, UR4, RZ, 0xfc, !PT ; /* 0x0000000408027c12 */
/* 0x000fc8000f8efcff */
/*0130*/ ISETP.NE.U32.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f05070 */
/*0140*/ @!P0 BRA 0x1c0 ; /* 0x0000007000008947 */
/* 0x000fea0003800000 */
/*0150*/ MOV R2, R6 ; /* 0x0000000600027202 */
/* 0x000fe20000000f00 */
/*0160*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff067624 */
/* 0x000fe200078e00ff */
/*0170*/ MOV R4, 0x1a0 ; /* 0x000001a000047802 */
/* 0x000fe20000000f00 */
/*0180*/ IMAD.U32 R7, RZ, RZ, UR4 ; /* 0x00000004ff077e24 */
/* 0x000fe4000f8e00ff */
/*0190*/ CALL.REL.NOINC 0x6b0 ; /* 0x0000051000007944 */
/* 0x000fea0003c00000 */
/*01a0*/ MOV R8, R2 ; /* 0x0000000200087202 */
/* 0x000fe20000000f00 */
/*01b0*/ BRA 0x2f0 ; /* 0x0000013000007947 */
/* 0x000fea0003800000 */
/*01c0*/ I2F.U32.RP R2, c[0x0][0x164] ; /* 0x0000590000027b06 */
/* 0x000e220000209000 */
/*01d0*/ ISETP.NE.U32.AND P1, PT, RZ, c[0x0][0x164], PT ; /* 0x00005900ff007a0c */
/* 0x000fce0003f25070 */
/*01e0*/ MUFU.RCP R2, R2 ; /* 0x0000000200027308 */
/* 0x001e240000001000 */
/*01f0*/ IADD3 R8, R2, 0xffffffe, RZ ; /* 0x0ffffffe02087810 */
/* 0x001fcc0007ffe0ff */
/*0200*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000064000021f000 */
/*0210*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x001fe400078e00ff */
/*0220*/ IMAD.MOV R7, RZ, RZ, -R9 ; /* 0x000000ffff077224 */
/* 0x002fc800078e0a09 */
/*0230*/ IMAD R7, R7, c[0x0][0x164], RZ ; /* 0x0000590007077a24 */
/* 0x000fc800078e02ff */
/*0240*/ IMAD.HI.U32 R9, R9, R7, R8 ; /* 0x0000000709097227 */
/* 0x000fcc00078e0008 */
/*0250*/ IMAD.HI.U32 R9, R9, R6, RZ ; /* 0x0000000609097227 */
/* 0x000fca00078e00ff */
/*0260*/ IADD3 R9, -R9, RZ, RZ ; /* 0x000000ff09097210 */
/* 0x000fca0007ffe1ff */
/*0270*/ IMAD R6, R9, c[0x0][0x164], R6 ; /* 0x0000590009067a24 */
/* 0x000fe400078e0206 */
/*0280*/ IMAD.MOV.U32 R9, RZ, RZ, RZ ; /* 0x000000ffff097224 */
/* 0x000fc600078e00ff */
/*0290*/ ISETP.GE.U32.AND P0, PT, R6, c[0x0][0x164], PT ; /* 0x0000590006007a0c */
/* 0x000fda0003f06070 */
/*02a0*/ @P0 IADD3 R6, R6, -c[0x0][0x164], RZ ; /* 0x8000590006060a10 */
/* 0x000fc80007ffe0ff */
/*02b0*/ ISETP.GE.U32.AND P0, PT, R6, c[0x0][0x164], PT ; /* 0x0000590006007a0c */
/* 0x000fda0003f06070 */
/*02c0*/ @P0 IADD3 R6, R6, -c[0x0][0x164], RZ ; /* 0x8000590006060a10 */
/* 0x000fe40007ffe0ff */
/*02d0*/ @!P1 LOP3.LUT R6, RZ, c[0x0][0x164], RZ, 0x33, !PT ; /* 0x00005900ff069a12 */
/* 0x000fca00078e33ff */
/*02e0*/ IMAD.MOV.U32 R8, RZ, RZ, R6 ; /* 0x000000ffff087224 */
/* 0x000fe400078e0006 */
/*02f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0300*/ IMAD R2, R9, R0.reuse, RZ ; /* 0x0000000009027224 */
/* 0x080fe200078e02ff */
/*0310*/ BSSY B0, 0x500 ; /* 0x000001e000007945 */
/* 0x000fe20003800000 */
/*0320*/ IMAD.WIDE.U32 R6, R8, R0, RZ ; /* 0x0000000008067225 */
/* 0x000fc800078e00ff */
/*0330*/ IMAD R9, R3, R8, R2 ; /* 0x0000000803097224 */
/* 0x000fca00078e0202 */
/*0340*/ IADD3 R8, R7, R9, RZ ; /* 0x0000000907087210 */
/* 0x000fc80007ffe0ff */
/*0350*/ LOP3.LUT R2, R8, UR4, RZ, 0xfc, !PT ; /* 0x0000000408027c12 */
/* 0x000fc8000f8efcff */
/*0360*/ ISETP.NE.U32.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fda0003f05070 */
/*0370*/ @!P0 BRA 0x3e0 ; /* 0x0000006000008947 */
/* 0x000fea0003800000 */
/*0380*/ IMAD.MOV.U32 R2, RZ, RZ, R6 ; /* 0x000000ffff027224 */
/* 0x000fe200078e0006 */
/*0390*/ MOV R7, UR4 ; /* 0x0000000400077c02 */
/* 0x000fe20008000f00 */
/*03a0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff067624 */
/* 0x000fe200078e00ff */
/*03b0*/ MOV R4, 0x3d0 ; /* 0x000003d000047802 */
/* 0x000fe40000000f00 */
/*03c0*/ CALL.REL.NOINC 0x6b0 ; /* 0x000002e000007944 */
/* 0x000fea0003c00000 */
/*03d0*/ BRA 0x4f0 ; /* 0x0000011000007947 */
/* 0x000fea0003800000 */
/*03e0*/ I2F.U32.RP R4, c[0x0][0x164] ; /* 0x0000590000047b06 */
/* 0x000e220000209000 */
/*03f0*/ ISETP.NE.U32.AND P1, PT, RZ, c[0x0][0x164], PT ; /* 0x00005900ff007a0c */
/* 0x000fce0003f25070 */
/*0400*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e240000001000 */
/*0410*/ IADD3 R8, R4, 0xffffffe, RZ ; /* 0x0ffffffe04087810 */
/* 0x001fcc0007ffe0ff */
/*0420*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000064000021f000 */
/*0430*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x001fe400078e00ff */
/*0440*/ IMAD.MOV R7, RZ, RZ, -R9 ; /* 0x000000ffff077224 */
/* 0x002fc800078e0a09 */
/*0450*/ IMAD R7, R7, c[0x0][0x164], RZ ; /* 0x0000590007077a24 */
/* 0x000fc800078e02ff */
/*0460*/ IMAD.HI.U32 R7, R9, R7, R8 ; /* 0x0000000709077227 */
/* 0x000fcc00078e0008 */
/*0470*/ IMAD.HI.U32 R2, R7, R6, RZ ; /* 0x0000000607027227 */
/* 0x000fca00078e00ff */
/*0480*/ IADD3 R7, -R2, RZ, RZ ; /* 0x000000ff02077210 */
/* 0x000fca0007ffe1ff */
/*0490*/ IMAD R2, R7, c[0x0][0x164], R6 ; /* 0x0000590007027a24 */
/* 0x000fca00078e0206 */
/*04a0*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x164], PT ; /* 0x0000590002007a0c */
/* 0x000fda0003f06070 */
/*04b0*/ @P0 IADD3 R2, R2, -c[0x0][0x164], RZ ; /* 0x8000590002020a10 */
/* 0x000fc80007ffe0ff */
/*04c0*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x164], PT ; /* 0x0000590002007a0c */
/* 0x000fda0003f06070 */
/*04d0*/ @P0 IADD3 R2, R2, -c[0x0][0x164], RZ ; /* 0x8000590002020a10 */
/* 0x000fe40007ffe0ff */
/*04e0*/ @!P1 LOP3.LUT R2, RZ, c[0x0][0x164], RZ, 0x33, !PT ; /* 0x00005900ff029a12 */
/* 0x000fe400078e33ff */
/*04f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0500*/ ISETP.NE.AND P0, PT, R2, c[0x0][0x160], PT ; /* 0x0000580002007a0c */
/* 0x000fe20003f05270 */
/*0510*/ BSSY B0, 0x650 ; /* 0x0000013000007945 */
/* 0x000fd80003800000 */
/*0520*/ @P0 BRA 0x640 ; /* 0x0000011000000947 */
/* 0x000fea0003800000 */
/*0530*/ S2R R7, SR_LANEID ; /* 0x0000000000077919 */
/* 0x000e220000000000 */
/*0540*/ VOTEU.ANY UR5, UPT, PT ; /* 0x0000000000057886 */
/* 0x000fe200038e0100 */
/*0550*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x4][0x0] ; /* 0x01000000ff067624 */
/* 0x000fe200078e00ff */
/*0560*/ FLO.U32 R2, UR5 ; /* 0x0000000500027d00 */
/* 0x000e3000080e0000 */
/*0570*/ POPC R11, UR5 ; /* 0x00000005000b7d09 */
/* 0x000e620008000000 */
/*0580*/ ISETP.EQ.U32.AND P0, PT, R2, R7, PT ; /* 0x000000070200720c */
/* 0x001fe20003f02070 */
/*0590*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x4][0x4] ; /* 0x01000100ff077624 */
/* 0x000fd800078e00ff */
/*05a0*/ @P0 ATOMG.E.ADD.STRONG.GPU PT, R7, [R6.64], R11 ; /* 0x0000000b060709a8 */
/* 0x002ea200081ee1c6 */
/*05b0*/ HFMA2.MMA R13, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0d7435 */
/* 0x000fc600000001ff */
/*05c0*/ S2R R4, SR_LTMASK ; /* 0x0000000000047919 */
/* 0x000e240000003900 */
/*05d0*/ LOP3.LUT R4, R4, UR5, RZ, 0xc0, !PT ; /* 0x0000000504047c12 */
/* 0x001fc8000f8ec0ff */
/*05e0*/ POPC R9, R4 ; /* 0x0000000400097309 */
/* 0x000e220000000000 */
/*05f0*/ SHFL.IDX PT, R8, R7, R2, 0x1f ; /* 0x00001f0207087589 */
/* 0x00422400000e0000 */
/*0600*/ IMAD.MOV.U32 R2, RZ, RZ, R0 ; /* 0x000000ffff027224 */
/* 0x002fe400078e0000 */
/*0610*/ IMAD.IADD R8, R8, 0x1, R9 ; /* 0x0000000108087824 */
/* 0x001fc800078e0209 */
/*0620*/ IMAD.WIDE R8, R8, R13, c[0x4][0x8] ; /* 0x0100020008087625 */
/* 0x000fca00078e020d */
/*0630*/ STG.E.64 [R8.64], R2 ; /* 0x0000000208007986 */
/* 0x0001e4000c101b06 */
/*0640*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0650*/ LEA R0, P0, R5, R0, 0xa ; /* 0x0000000005007211 */
/* 0x000fc800078050ff */
/*0660*/ IADD3.X R3, RZ, R3, RZ, P0, !PT ; /* 0x00000003ff037210 */
/* 0x001fe400007fe4ff */
/*0670*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x164], PT ; /* 0x0000590000007a0c */
/* 0x000fc80003f06070 */
/*0680*/ ISETP.GE.U32.AND.EX P0, PT, R3, UR4, PT, P0 ; /* 0x0000000403007c0c */
/* 0x000fda000bf06100 */
/*0690*/ @!P0 BRA 0xc0 ; /* 0xfffffa2000008947 */
/* 0x000fea000383ffff */
/*06a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*06b0*/ I2F.U64.RP R9, R6 ; /* 0x0000000600097312 */
/* 0x000e300000309000 */
/*06c0*/ MUFU.RCP R9, R9 ; /* 0x0000000900097308 */
/* 0x001e240000001000 */
/*06d0*/ IADD3 R10, R9, 0x1ffffffe, RZ ; /* 0x1ffffffe090a7810 */
/* 0x001fcc0007ffe0ff */
/*06e0*/ F2I.U64.TRUNC R10, R10 ; /* 0x0000000a000a7311 */
/* 0x000e24000020d800 */
/*06f0*/ IMAD.WIDE.U32 R12, R10, R6, RZ ; /* 0x000000060a0c7225 */
/* 0x001fc800078e00ff */
/*0700*/ IMAD R13, R10, R7, R13 ; /* 0x000000070a0d7224 */
/* 0x000fe200078e020d */
/*0710*/ IADD3 R15, P0, RZ, -R12, RZ ; /* 0x8000000cff0f7210 */
/* 0x000fc60007f1e0ff */
/*0720*/ IMAD R13, R11, R6, R13 ; /* 0x000000060b0d7224 */
/* 0x000fe400078e020d */
/*0730*/ IMAD.HI.U32 R12, R10, R15, RZ ; /* 0x0000000f0a0c7227 */
/* 0x000fc800078e00ff */
/*0740*/ IMAD.X R17, RZ, RZ, ~R13, P0 ; /* 0x000000ffff117224 */
/* 0x000fe400000e0e0d */
/*0750*/ IMAD.MOV.U32 R13, RZ, RZ, R10 ; /* 0x000000ffff0d7224 */
/* 0x000fe400078e000a */
/*0760*/ IMAD R19, R11, R17.reuse, RZ ; /* 0x000000110b137224 */
/* 0x080fe400078e02ff */
/*0770*/ IMAD.WIDE.U32 R12, P0, R10, R17, R12 ; /* 0x000000110a0c7225 */
/* 0x000fc8000780000c */
/*0780*/ IMAD.HI.U32 R9, R11, R17, RZ ; /* 0x000000110b097227 */
/* 0x000fc800078e00ff */
/*0790*/ IMAD.HI.U32 R12, P1, R11, R15, R12 ; /* 0x0000000f0b0c7227 */
/* 0x000fe2000782000c */
/*07a0*/ IADD3.X R9, R9, R11, RZ, P0, !PT ; /* 0x0000000b09097210 */
/* 0x000fc800007fe4ff */
/*07b0*/ IADD3 R13, P2, R19, R12, RZ ; /* 0x0000000c130d7210 */
/* 0x000fc80007f5e0ff */
/*07c0*/ IADD3.X R9, RZ, RZ, R9, P2, P1 ; /* 0x000000ffff097210 */
/* 0x000fe200017e2409 */
/*07d0*/ IMAD.WIDE.U32 R10, R13, R6, RZ ; /* 0x000000060d0a7225 */
/* 0x000fc800078e00ff */
/*07e0*/ IMAD R11, R13, R7, R11 ; /* 0x000000070d0b7224 */
/* 0x000fe200078e020b */
/*07f0*/ IADD3 R15, P0, RZ, -R10, RZ ; /* 0x8000000aff0f7210 */
/* 0x000fc60007f1e0ff */
/*0800*/ IMAD R11, R9, R6, R11 ; /* 0x00000006090b7224 */
/* 0x000fe400078e020b */
/*0810*/ IMAD.HI.U32 R12, R13, R15, RZ ; /* 0x0000000f0d0c7227 */
/* 0x000fc800078e00ff */
/*0820*/ IMAD.X R10, RZ, RZ, ~R11, P0 ; /* 0x000000ffff0a7224 */
/* 0x000fe400000e0e0b */
/*0830*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e00ff */
/*0840*/ IMAD.WIDE.U32 R12, P0, R13, R10, R12 ; /* 0x0000000a0d0c7225 */
/* 0x000fc8000780000c */
/*0850*/ IMAD R14, R9.reuse, R10, RZ ; /* 0x0000000a090e7224 */
/* 0x040fe400078e02ff */
/*0860*/ IMAD.HI.U32 R13, P1, R9, R15, R12 ; /* 0x0000000f090d7227 */
/* 0x000fc8000782000c */
/*0870*/ IMAD.HI.U32 R10, R9, R10, RZ ; /* 0x0000000a090a7227 */
/* 0x000fe200078e00ff */
/*0880*/ IADD3 R13, P2, R14, R13, RZ ; /* 0x0000000d0e0d7210 */
/* 0x000fc80007f5e0ff */
/*0890*/ IADD3.X R9, R10, R9, RZ, P0, !PT ; /* 0x000000090a097210 */
/* 0x000fe200007fe4ff */
/*08a0*/ IMAD.HI.U32 R10, R13, R2, RZ ; /* 0x000000020d0a7227 */
/* 0x000fc600078e00ff */
/*08b0*/ IADD3.X R9, RZ, RZ, R9, P2, P1 ; /* 0x000000ffff097210 */
/* 0x000fc600017e2409 */
/*08c0*/ IMAD.WIDE.U32 R10, R13, R8, R10 ; /* 0x000000080d0a7225 */
/* 0x000fc800078e000a */
/*08d0*/ IMAD R13, R9.reuse, R8, RZ ; /* 0x00000008090d7224 */
/* 0x040fe400078e02ff */
/*08e0*/ IMAD.HI.U32 R10, P0, R9, R2, R10 ; /* 0x00000002090a7227 */
/* 0x000fc8000780000a */
/*08f0*/ IMAD.HI.U32 R9, R9, R8, RZ ; /* 0x0000000809097227 */
/* 0x000fe200078e00ff */
/*0900*/ IADD3 R13, P1, R13, R10, RZ ; /* 0x0000000a0d0d7210 */
/* 0x000fc80007f3e0ff */
/*0910*/ IADD3.X R9, R9, RZ, RZ, P0, !PT ; /* 0x000000ff09097210 */
/* 0x000fe200007fe4ff */
/*0920*/ IMAD.WIDE.U32 R10, R13, R6, RZ ; /* 0x000000060d0a7225 */
/* 0x000fc800078e00ff */
/*0930*/ IMAD.X R9, RZ, RZ, R9, P1 ; /* 0x000000ffff097224 */
/* 0x000fe200008e0609 */
/*0940*/ IADD3 R15, P1, -R10, R2, RZ ; /* 0x000000020a0f7210 */
/* 0x000fe20007f3e1ff */
/*0950*/ IMAD R13, R13, R7, R11 ; /* 0x000000070d0d7224 */
/* 0x000fc600078e020b */
/*0960*/ ISETP.GE.U32.AND P0, PT, R15, R6.reuse, PT ; /* 0x000000060f00720c */
/* 0x080fe20003f06070 */
/*0970*/ IMAD R9, R9, R6, R13 ; /* 0x0000000609097224 */
/* 0x000fca00078e020d */
/*0980*/ IADD3.X R2, ~R9, R8, RZ, P1, !PT ; /* 0x0000000809027210 */
/* 0x000fe40000ffe5ff */
/*0990*/ IADD3 R11, P1, R15, -R6, RZ ; /* 0x800000060f0b7210 */
/* 0x000fe40007f3e0ff */
/*09a0*/ ISETP.GE.U32.AND.EX P0, PT, R2, R7, PT, P0 ; /* 0x000000070200720c */
/* 0x000fc60003f06100 */
/*09b0*/ IMAD.X R8, R2, 0x1, ~R7, P1 ; /* 0x0000000102087824 */
/* 0x000fe200008e0e07 */
/*09c0*/ SEL R11, R11, R15, P0 ; /* 0x0000000f0b0b7207 */
/* 0x000fe40000000000 */
/*09d0*/ ISETP.NE.U32.AND P1, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f25070 */
/*09e0*/ SEL R8, R8, R2, P0 ; /* 0x0000000208087207 */
/* 0x000fe40000000000 */
/*09f0*/ ISETP.GE.U32.AND P0, PT, R11.reuse, R6.reuse, PT ; /* 0x000000060b00720c */
/* 0x0c0fe40003f06070 */
/*0a00*/ IADD3 R2, P2, R11, -R6, RZ ; /* 0x800000060b027210 */
/* 0x000fe20007f5e0ff */
/*0a10*/ IMAD.MOV.U32 R6, RZ, RZ, R4 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0004 */
/*0a20*/ ISETP.GE.U32.AND.EX P0, PT, R8, R7, PT, P0 ; /* 0x000000070800720c */
/* 0x000fc40003f06100 */
/*0a30*/ IADD3.X R9, ~R7.reuse, R8.reuse, RZ, P2, !PT ; /* 0x0000000807097210 */
/* 0x0c0fe400017fe5ff */
/*0a40*/ ISETP.NE.AND.EX P1, PT, R7, RZ, PT, P1 ; /* 0x000000ff0700720c */
/* 0x000fe40003f25310 */
/*0a50*/ MOV R7, 0x0 ; /* 0x0000000000077802 */
/* 0x000fe40000000f00 */
/*0a60*/ SEL R2, R2, R11, P0 ; /* 0x0000000b02027207 */
/* 0x000fe40000000000 */
/*0a70*/ SEL R9, R9, R8, P0 ; /* 0x0000000809097207 */
/* 0x000fe40000000000 */
/*0a80*/ SEL R2, R2, 0xffffffff, P1 ; /* 0xffffffff02027807 */
/* 0x000fc40000800000 */
/*0a90*/ SEL R9, R9, 0xffffffff, P1 ; /* 0xffffffff09097807 */
/* 0x000fe20000800000 */
/*0aa0*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff55006007950 */
/* 0x000fec0003c3ffff */
/*0ab0*/ BRA 0xab0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected modCubeRoot
.globl modCubeRoot
.p2align 8
.type modCubeRoot,@function
modCubeRoot:
s_load_b32 s2, s[0:1], 0x4
v_mov_b32_e32 v1, 0
v_lshl_or_b32 v3, s15, 10, v0
s_mov_b32 s4, exec_lo
s_delay_alu instid0(VALU_DEP_2)
v_mov_b32_e32 v4, v1
s_waitcnt lgkmcnt(0)
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmpx_gt_u64_e64 s[2:3], v[3:4]
s_cbranch_execz .LBB0_15
v_cvt_f32_u32_e32 v0, s2
v_cvt_f32_u32_e32 v2, s3
s_clause 0x1
s_load_b32 s4, s[0:1], 0x8
s_load_b32 s1, s[0:1], 0x0
s_mov_b32 s5, 0
s_sub_i32 s6, 0, s2
v_fmamk_f32 v0, v2, 0x4f800000, v0
s_mov_b32 s7, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x5f7ffffc, v0
s_waitcnt lgkmcnt(0)
s_lshl_b32 s4, s4, 10
v_mul_f32_e32 v2, 0x2f800000, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_trunc_f32_e32 v2, v2
v_fmamk_f32 v0, v2, 0xcf800000, v0
v_cvt_u32_f32_e32 v9, v2
s_delay_alu instid0(VALU_DEP_2)
v_cvt_u32_f32_e32 v0, v0
s_branch .LBB0_4
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, s8, v2
s_getpc_b64 s[8:9]
s_add_u32 s8, s8, devAnswer@rel32@lo+4
s_addc_u32 s9, s9, devAnswer@rel32@hi+12
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 3, v[5:6]
v_add_co_u32 v5, vcc_lo, v5, s8
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s9, v6, vcc_lo
global_store_b64 v[5:6], v[3:4], off
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s0
v_add_co_u32 v3, vcc_lo, v3, s4
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_u64_e32 vcc_lo, s[2:3], v[3:4]
s_or_b32 s7, vcc_lo, s7
s_and_not1_b32 exec_lo, exec_lo, s7
s_cbranch_execz .LBB0_15
.LBB0_4:
v_mul_lo_u32 v2, v3, v4
v_mad_u64_u32 v[5:6], null, v3, v3, 0
s_mov_b32 s0, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v6, v6, v2, v2
v_or_b32_e32 v2, s3, v6
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_ne_u64_e32 0, v[1:2]
s_xor_b32 s8, exec_lo, s0
s_cbranch_execz .LBB0_6
s_sub_u32 s0, 0, s2
s_subb_u32 s9, 0, s3
v_mul_hi_u32 v2, s0, v0
v_mul_lo_u32 v7, s0, v9
v_mul_lo_u32 v8, s9, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v2, v7
v_mul_lo_u32 v7, s0, v0
v_add_nc_u32_e32 v2, v2, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v8, v0, v7
v_mul_lo_u32 v10, v0, v2
v_mul_hi_u32 v11, v0, v2
v_mul_hi_u32 v12, v9, v7
v_mul_lo_u32 v7, v9, v7
v_mul_hi_u32 v13, v9, v2
v_mul_lo_u32 v2, v9, v2
v_add_co_u32 v8, vcc_lo, v8, v10
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v7, vcc_lo, v8, v7
v_add_co_ci_u32_e32 v7, vcc_lo, v10, v12, vcc_lo
v_add_co_ci_u32_e32 v8, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v7, v2
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v0, v2
v_add_co_ci_u32_e32 v7, vcc_lo, v9, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v8, s0, v2
v_mul_lo_u32 v11, s9, v2
v_mul_lo_u32 v10, s0, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, v8, v10
v_mul_lo_u32 v10, s0, v2
v_add_nc_u32_e32 v8, v8, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v11, v2, v10
v_mul_lo_u32 v12, v2, v8
v_mul_hi_u32 v13, v2, v8
v_mul_hi_u32 v14, v7, v10
v_mul_lo_u32 v10, v7, v10
v_mul_hi_u32 v15, v7, v8
v_mul_lo_u32 v8, v7, v8
v_add_co_u32 v11, vcc_lo, v11, v12
v_add_co_ci_u32_e32 v12, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, v11, v10
v_add_co_ci_u32_e32 v10, vcc_lo, v12, v14, vcc_lo
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v15, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, v10, v8
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v8
v_add_co_ci_u32_e32 v14, vcc_lo, v7, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v15, v5, v2
v_mad_u64_u32 v[10:11], null, v6, v2, 0
v_mad_u64_u32 v[7:8], null, v5, v14, 0
v_mad_u64_u32 v[12:13], null, v6, v14, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, v15, v7
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v10
v_add_co_ci_u32_e32 v2, vcc_lo, v7, v11, vcc_lo
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v12
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_lo_u32 v11, s3, v2
v_mad_u64_u32 v[7:8], null, s2, v2, 0
v_mul_lo_u32 v2, s2, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_co_u32 v5, vcc_lo, v5, v7
v_add3_u32 v2, v8, v2, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v8, v6, v2
v_subrev_co_ci_u32_e64 v7, s0, s3, v8, vcc_lo
v_sub_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo
v_sub_co_u32 v6, vcc_lo, v5, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_subrev_co_ci_u32_e64 v8, s0, 0, v7, vcc_lo
v_cmp_le_u32_e64 s0, s2, v5
v_subrev_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s3, v2
v_cndmask_b32_e64 v10, 0, -1, s0
v_cmp_le_u32_e64 s0, s2, v6
v_cndmask_b32_e64 v13, 0, -1, vcc_lo
v_cmp_eq_u32_e32 vcc_lo, s3, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cndmask_b32_e64 v11, 0, -1, s0
v_cmp_le_u32_e64 s0, s3, v8
v_cndmask_b32_e64 v12, 0, -1, s0
v_cmp_eq_u32_e64 s0, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v11, v12, v11, vcc_lo
v_sub_co_u32 v12, vcc_lo, v6, s2
v_subrev_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
v_cmp_ne_u32_e32 vcc_lo, 0, v11
v_cndmask_b32_e64 v10, v13, v10, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_cndmask_b32 v7, v8, v7 :: v_dual_cndmask_b32 v6, v6, v12
v_cmp_ne_u32_e32 vcc_lo, 0, v10
s_delay_alu instid0(VALU_DEP_2)
v_dual_cndmask_b32 v8, v2, v7 :: v_dual_cndmask_b32 v7, v5, v6
.LBB0_6:
s_or_saveexec_b32 s0, s8
v_cvt_f32_u32_e32 v10, s2
s_xor_b32 exec_lo, exec_lo, s0
s_cbranch_execz .LBB0_8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v10
v_mov_b32_e32 v8, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v6, s6, v2
v_mul_hi_u32 v6, v2, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v6
v_mul_hi_u32 v2, v5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, v2, s2
v_sub_nc_u32_e32 v2, v5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
v_cndmask_b32_e32 v7, v2, v5, vcc_lo
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s0
v_mul_lo_u32 v2, v8, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mul_lo_u32 v8, v7, v4
v_mad_u64_u32 v[5:6], null, v7, v3, 0
s_mov_b32 s0, exec_lo
v_add3_u32 v6, v6, v8, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_or_b32_e32 v2, s3, v6
v_cmpx_ne_u64_e32 0, v[1:2]
s_xor_b32 s8, exec_lo, s0
s_cbranch_execz .LBB0_10
s_sub_u32 s0, 0, s2
s_subb_u32 s9, 0, s3
v_mul_hi_u32 v2, s0, v0
v_mul_lo_u32 v7, s0, v9
v_mul_lo_u32 v8, s9, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v2, v7
v_mul_lo_u32 v7, s0, v0
v_add_nc_u32_e32 v2, v2, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v8, v0, v7
v_mul_lo_u32 v10, v0, v2
v_mul_hi_u32 v11, v0, v2
v_mul_hi_u32 v12, v9, v7
v_mul_lo_u32 v7, v9, v7
v_mul_hi_u32 v13, v9, v2
v_mul_lo_u32 v2, v9, v2
v_add_co_u32 v8, vcc_lo, v8, v10
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v7, vcc_lo, v8, v7
v_add_co_ci_u32_e32 v7, vcc_lo, v10, v12, vcc_lo
v_add_co_ci_u32_e32 v8, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v7, v2
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v0, v2
v_add_co_ci_u32_e32 v7, vcc_lo, v9, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v8, s0, v2
v_mul_lo_u32 v11, s9, v2
v_mul_lo_u32 v10, s0, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, v8, v10
v_mul_lo_u32 v10, s0, v2
v_add_nc_u32_e32 v8, v8, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v11, v2, v10
v_mul_lo_u32 v12, v2, v8
v_mul_hi_u32 v13, v2, v8
v_mul_hi_u32 v14, v7, v10
v_mul_lo_u32 v10, v7, v10
v_mul_hi_u32 v15, v7, v8
v_mul_lo_u32 v8, v7, v8
v_add_co_u32 v11, vcc_lo, v11, v12
v_add_co_ci_u32_e32 v12, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v10, vcc_lo, v11, v10
v_add_co_ci_u32_e32 v10, vcc_lo, v12, v14, vcc_lo
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v15, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, v10, v8
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v8
v_add_co_ci_u32_e32 v14, vcc_lo, v7, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_hi_u32 v15, v5, v2
v_mad_u64_u32 v[10:11], null, v6, v2, 0
v_mad_u64_u32 v[7:8], null, v5, v14, 0
v_mad_u64_u32 v[12:13], null, v6, v14, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, v15, v7
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v10
v_add_co_ci_u32_e32 v2, vcc_lo, v7, v11, vcc_lo
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v13, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v2, v12
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_lo_u32 v11, s3, v2
v_mad_u64_u32 v[7:8], null, s2, v2, 0
v_mul_lo_u32 v2, s2, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_co_u32 v5, vcc_lo, v5, v7
v_add3_u32 v2, v8, v2, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v8, v6, v2
v_subrev_co_ci_u32_e64 v7, s0, s3, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_sub_co_u32 v8, s0, v5, s2
v_sub_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo
v_subrev_co_ci_u32_e64 v7, s0, 0, v7, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s2, v8
v_cndmask_b32_e64 v6, 0, -1, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s3, v7
v_cndmask_b32_e64 v10, 0, -1, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s2, v5
v_cndmask_b32_e64 v11, 0, -1, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s3, v2
v_cndmask_b32_e64 v12, 0, -1, vcc_lo
v_cmp_eq_u32_e32 vcc_lo, s3, v7
v_cndmask_b32_e32 v6, v10, v6, vcc_lo
v_sub_co_u32 v7, vcc_lo, v8, s2
v_cmp_eq_u32_e32 vcc_lo, s3, v2
v_cndmask_b32_e32 v2, v12, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cmp_ne_u32_e32 vcc_lo, 0, v6
v_cndmask_b32_e32 v6, v8, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_ne_u32_e32 vcc_lo, 0, v2
v_cndmask_b32_e32 v7, v5, v6, vcc_lo
.LBB0_10:
s_and_not1_saveexec_b32 s0, s8
s_cbranch_execz .LBB0_12
v_rcp_iflag_f32_e32 v2, v10
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v2, v2
v_mul_lo_u32 v6, s6, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v6, v2, v6
v_add_nc_u32_e32 v2, v2, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v2, v5, v2
v_mul_lo_u32 v2, v2, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v2, v5, v2
v_subrev_nc_u32_e32 v5, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
v_subrev_nc_u32_e32 v5, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_cndmask_b32_e32 v7, v2, v5, vcc_lo
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_mov_b32 s0, exec_lo
v_cmpx_eq_u32_e64 s1, v7
s_cbranch_execz .LBB0_3
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v2, s9, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v2
s_cbranch_execz .LBB0_2
s_bcnt1_i32_b32 s9, s9
s_getpc_b64 s[10:11]
s_add_u32 s10, s10, devArrayIndex@rel32@lo+4
s_addc_u32 s11, s11, devArrayIndex@rel32@hi+12
v_mov_b32_e32 v5, s9
global_atomic_add_u32 v5, v1, v5, s[10:11] glc
s_branch .LBB0_2
.LBB0_15:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel modCubeRoot
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 16
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size modCubeRoot, .Lfunc_end0-modCubeRoot
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected devArrayIndex
.type devArrayIndex,@object
.section .bss,"aw",@nobits
.globl devArrayIndex
.p2align 2, 0x0
devArrayIndex:
.long 0
.size devArrayIndex, 4
.protected devAnswer
.type devAnswer,@object
.globl devAnswer
.p2align 4, 0x0
devAnswer:
.zero 24
.size devAnswer, 24
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym devArrayIndex
.addrsig_sym devAnswer
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: modCubeRoot
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: modCubeRoot.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 16
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00091e3e_00000000-6_ModCubeRoot.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z11modCubeRootiiii
.type _Z31__device_stub__Z11modCubeRootiiii, @function
_Z31__device_stub__Z11modCubeRootiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq modCubeRoot(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z11modCubeRootiiii, .-_Z31__device_stub__Z11modCubeRootiiii
.globl modCubeRoot
.type modCubeRoot, @function
modCubeRoot:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z11modCubeRootiiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size modCubeRoot, .-modCubeRoot
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "modCubeRoot"
.LC1:
.string "devArrayIndex"
.LC2:
.string "devAnswer"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq modCubeRoot(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13devArrayIndex(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $24, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL9devAnswer(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL9devAnswer
.comm _ZL9devAnswer,24,16
.local _ZL13devArrayIndex
.comm _ZL13devArrayIndex,4,4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "ModCubeRoot.hip"
.globl __device_stub__modCubeRoot # -- Begin function __device_stub__modCubeRoot
.p2align 4, 0x90
.type __device_stub__modCubeRoot,@function
__device_stub__modCubeRoot: # @__device_stub__modCubeRoot
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
leaq 12(%rsp), %rax
movq %rax, 64(%rsp)
leaq 8(%rsp), %rax
movq %rax, 72(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 64(%rsp), %r9
movl $modCubeRoot, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size __device_stub__modCubeRoot, .Lfunc_end0-__device_stub__modCubeRoot
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $modCubeRoot, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $devArrayIndex, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $devAnswer, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $24, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type devArrayIndex,@object # @devArrayIndex
.local devArrayIndex
.comm devArrayIndex,4,4
.type devAnswer,@object # @devAnswer
.local devAnswer
.comm devAnswer,24,16
.type modCubeRoot,@object # @modCubeRoot
.section .rodata,"a",@progbits
.globl modCubeRoot
.p2align 3, 0x0
modCubeRoot:
.quad __device_stub__modCubeRoot
.size modCubeRoot, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "modCubeRoot"
.size .L__unnamed_1, 12
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "devArrayIndex"
.size .L__unnamed_2, 14
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "devAnswer"
.size .L__unnamed_3, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__modCubeRoot
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym devArrayIndex
.addrsig_sym devAnswer
.addrsig_sym modCubeRoot
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cstdio>
#include <stdio.h>
int main(void)
{
int a[100000];
int b[100000];
int *ary1;
for(int i=0;i<100000;i++)
{
a[i] = i;
}
cudaMalloc((void**)&ary1 , 100000*sizeof(int));
cudaMemcpy(ary1, a, 100000*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b, ary1 ,100000*sizeof(int),cudaMemcpyDeviceToHost);
for(int i =0; i < 100000; i++)
{
if(i/500 ==0)
{
printf("%d ",i);
}
}
cudaFree(ary1);
return 0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cstdio>
#include <stdio.h>
int main(void)
{
int a[100000];
int b[100000];
int *ary1;
for(int i=0;i<100000;i++)
{
a[i] = i;
}
cudaMalloc((void**)&ary1 , 100000*sizeof(int));
cudaMemcpy(ary1, a, 100000*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b, ary1 ,100000*sizeof(int),cudaMemcpyDeviceToHost);
for(int i =0; i < 100000; i++)
{
if(i/500 ==0)
{
printf("%d ",i);
}
}
cudaFree(ary1);
return 0;
} | .file "tmpxft_00131a50_00000000-6_mem.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d "
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq -798720(%rsp), %r11
.cfi_def_cfa 11, 798736
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $1312, %rsp
.cfi_def_cfa_offset 800048
movq %fs:40, %rax
movq %rax, 800024(%rsp)
xorl %eax, %eax
.L4:
movl %eax, 16(%rsp,%rax,4)
addq $1, %rax
cmpq $100000, %rax
jne .L4
leaq 8(%rsp), %rdi
movl $400000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rsi
movl $1, %ecx
movl $400000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 400016(%rsp), %rdi
movl $2, %ecx
movl $400000, %edx
movq 8(%rsp), %rsi
call cudaMemcpy@PLT
movl $0, %ebx
.L8:
leal 499(%rbx), %eax
cmpl $998, %eax
jbe .L12
addl $1, %ebx
cmpl $100000, %ebx
jne .L8
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 800024(%rsp), %rax
subq %fs:40, %rax
jne .L13
movl $0, %eax
addq $800032, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
movl %ebx, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
jmp .L8
.L13:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cstdio>
#include <stdio.h>
int main(void)
{
int a[100000];
int b[100000];
int *ary1;
for(int i=0;i<100000;i++)
{
a[i] = i;
}
cudaMalloc((void**)&ary1 , 100000*sizeof(int));
cudaMemcpy(ary1, a, 100000*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b, ary1 ,100000*sizeof(int),cudaMemcpyDeviceToHost);
for(int i =0; i < 100000; i++)
{
if(i/500 ==0)
{
printf("%d ",i);
}
}
cudaFree(ary1);
return 0;
} | #include <hip/hip_runtime.h>
#include <cstdio>
#include <stdio.h>
int main(void)
{
int a[100000];
int b[100000];
int *ary1;
for(int i=0;i<100000;i++)
{
a[i] = i;
}
hipMalloc((void**)&ary1 , 100000*sizeof(int));
hipMemcpy(ary1, a, 100000*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(b, ary1 ,100000*sizeof(int),hipMemcpyDeviceToHost);
for(int i =0; i < 100000; i++)
{
if(i/500 ==0)
{
printf("%d ",i);
}
}
hipFree(ary1);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <cstdio>
#include <stdio.h>
int main(void)
{
int a[100000];
int b[100000];
int *ary1;
for(int i=0;i<100000;i++)
{
a[i] = i;
}
hipMalloc((void**)&ary1 , 100000*sizeof(int));
hipMemcpy(ary1, a, 100000*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(b, ary1 ,100000*sizeof(int),hipMemcpyDeviceToHost);
for(int i =0; i < 100000; i++)
{
if(i/500 ==0)
{
printf("%d ",i);
}
}
hipFree(ary1);
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <cstdio>
#include <stdio.h>
int main(void)
{
int a[100000];
int b[100000];
int *ary1;
for(int i=0;i<100000;i++)
{
a[i] = i;
}
hipMalloc((void**)&ary1 , 100000*sizeof(int));
hipMemcpy(ary1, a, 100000*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(b, ary1 ,100000*sizeof(int),hipMemcpyDeviceToHost);
for(int i =0; i < 100000; i++)
{
if(i/500 ==0)
{
printf("%d ",i);
}
}
hipFree(ary1);
return 0;
} | .text
.file "mem.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $800016, %rsp # imm = 0xC3510
.cfi_def_cfa_offset 800032
.cfi_offset %rbx, -16
xorl %eax, %eax
.p2align 4, 0x90
.LBB0_1: # =>This Inner Loop Header: Depth=1
movl %eax, 16(%rsp,%rax,4)
incq %rax
cmpq $100000, %rax # imm = 0x186A0
jne .LBB0_1
# %bb.2:
leaq 8(%rsp), %rdi
movl $400000, %esi # imm = 0x61A80
callq hipMalloc
movq 8(%rsp), %rdi
leaq 16(%rsp), %rsi
movl $400000, %edx # imm = 0x61A80
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
leaq 400016(%rsp), %rdi
movl $400000, %edx # imm = 0x61A80
movl $2, %ecx
callq hipMemcpy
xorl %ebx, %ebx
jmp .LBB0_3
.p2align 4, 0x90
.LBB0_5: # in Loop: Header=BB0_3 Depth=1
incl %ebx
cmpl $100000, %ebx # imm = 0x186A0
je .LBB0_6
.LBB0_3: # =>This Inner Loop Header: Depth=1
cmpl $499, %ebx # imm = 0x1F3
ja .LBB0_5
# %bb.4: # in Loop: Header=BB0_3 Depth=1
movl $.L.str, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
jmp .LBB0_5
.LBB0_6:
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $800016, %rsp # imm = 0xC3510
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d "
.size .L.str, 4
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00131a50_00000000-6_mem.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d "
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq -798720(%rsp), %r11
.cfi_def_cfa 11, 798736
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $1312, %rsp
.cfi_def_cfa_offset 800048
movq %fs:40, %rax
movq %rax, 800024(%rsp)
xorl %eax, %eax
.L4:
movl %eax, 16(%rsp,%rax,4)
addq $1, %rax
cmpq $100000, %rax
jne .L4
leaq 8(%rsp), %rdi
movl $400000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rsi
movl $1, %ecx
movl $400000, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 400016(%rsp), %rdi
movl $2, %ecx
movl $400000, %edx
movq 8(%rsp), %rsi
call cudaMemcpy@PLT
movl $0, %ebx
.L8:
leal 499(%rbx), %eax
cmpl $998, %eax
jbe .L12
addl $1, %ebx
cmpl $100000, %ebx
jne .L8
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 800024(%rsp), %rax
subq %fs:40, %rax
jne .L13
movl $0, %eax
addq $800032, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
movl %ebx, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
jmp .L8
.L13:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "mem.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $800016, %rsp # imm = 0xC3510
.cfi_def_cfa_offset 800032
.cfi_offset %rbx, -16
xorl %eax, %eax
.p2align 4, 0x90
.LBB0_1: # =>This Inner Loop Header: Depth=1
movl %eax, 16(%rsp,%rax,4)
incq %rax
cmpq $100000, %rax # imm = 0x186A0
jne .LBB0_1
# %bb.2:
leaq 8(%rsp), %rdi
movl $400000, %esi # imm = 0x61A80
callq hipMalloc
movq 8(%rsp), %rdi
leaq 16(%rsp), %rsi
movl $400000, %edx # imm = 0x61A80
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
leaq 400016(%rsp), %rdi
movl $400000, %edx # imm = 0x61A80
movl $2, %ecx
callq hipMemcpy
xorl %ebx, %ebx
jmp .LBB0_3
.p2align 4, 0x90
.LBB0_5: # in Loop: Header=BB0_3 Depth=1
incl %ebx
cmpl $100000, %ebx # imm = 0x186A0
je .LBB0_6
.LBB0_3: # =>This Inner Loop Header: Depth=1
cmpl $499, %ebx # imm = 0x1F3
ja .LBB0_5
# %bb.4: # in Loop: Header=BB0_3 Depth=1
movl $.L.str, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
jmp .LBB0_5
.LBB0_6:
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $800016, %rsp # imm = 0xC3510
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d "
.size .L.str, 4
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
//134217728
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void ecuacion_kernel_outplace_p1(double *d_matA,double *d_matAT,double *d_matB,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//transpuesta out-place A y B
if( (distA<n*n) && (distB<n*n) ){
d_matAT [distB*n + distA] = d_matA[distA*n + distB];
d_matBT [distB*n + distA] = d_matB[distA*n + distB];
}
}
__global__ void ecuacion_kernel_outplace_p2(double *d_matA,double *d_matB,double *d_matC,double *d_matAT,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
if (distA*n+distB <= (n*n - 1)){
//multiplicacion
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matBT[distB+k*n];
}
//suma
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matAT[distA*n+distB];
}
}
__global__ void ecuacion_kernel_inplace_suma (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n - 1)){
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matA[distA+distB*n];
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
__global__ void kernel_sum_Matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//suma
if (distA*n+distB < (n*n)){
d_matC[distA*n+distB] += d_matA[distA*n+distB] + d_matB[distA+distB*n];
}
}
__global__ void kernel_transpuesta(double *m, int N){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int i = int((1 + sqrtf(1 + 8*tid)) / 2);
int j = tid - (i*(i-1)/2); int aux;
if ( (i<N) && (j<N) ){
aux = m[i*N + j] ;
m[i*N + j] = m[j*N + i];
m[j*N + i] = aux;
}
}
__global__ void kernel_mult_sum_matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n)){
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N, CUDABLK\n");
return 0;
}
//declaracion de variables
cudaError_t error;
unsigned int N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi (argv[2]), gridBlock;
unsigned long numBytes = sizeof(double)*N*N;
double *matA,*matB,*matC,*d_matA,*d_matB,*d_matC,*d_matAT,*d_matBT,timetick;
unsigned int i,j,k;
//inicializa variables para cpu
matA = (double *)malloc(numBytes);
matB = (double *)malloc(numBytes);
matC = (double *)malloc(numBytes);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
//inicializa variables para gpu
cudaMalloc((void **) &d_matA, numBytes);
cudaMalloc((void **) &d_matAT, numBytes);
cudaMalloc((void **) &d_matB, numBytes);
cudaMalloc((void **) &d_matBT, numBytes);
cudaMalloc((void **) &d_matC, numBytes);
gridBlock = (unsigned int)sqrt(N*N/CUDA_BLK/CUDA_BLK);
dim3 dimBlock(CUDA_BLK,CUDA_BLK); // Bloque bidimencional de hilos (*cb* hilos)
dim3 dimGrid(gridBlock,gridBlock); // Grid bidimencional (*ceil(n/cb)* bloques)
//--------------------------------cpu comienza ------------------------------------
//secuencial
timetick = dwalltime();
//multiplicacion
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
for(k = 0; k < N ;k++){
matC[i*N+j] += matA[i*N+k] * matB[j*N+k]; //multiplica a matB por fila, eso simula la matB transpuesta
}
}
}
//suma
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
matC[i*N+j] += matB[i*N+j] + matA[i+j*N];
}
}
printf("Tiempo para la ecuacion CPU: %f\n\n",dwalltime() - timetick);
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------cpu termina ------------------------------------
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
cudaMemcpy(d_matA, matA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matB, matB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matC, matC, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu out-place comienza ------------------------------------
timetick = dwalltime();
ecuacion_kernel_outplace_p1<<<dimGrid, dimBlock>>>(d_matA, d_matAT,d_matB,d_matBT, N);
cudaThreadSynchronize();
ecuacion_kernel_outplace_p2<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC,d_matAT,d_matBT, N);
cudaThreadSynchronize();
printf("Tiempo para la ecuacion out-place GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n\n",error);
cudaMemcpy(matC, d_matC, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------gpu out-place termina ------------------------------------
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
cudaFree(d_matAT);
cudaFree(d_matBT);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
cudaMalloc((void **) &d_matA, numBytes);
cudaMalloc((void **) &d_matB, numBytes);
cudaMalloc((void **) &d_matC, numBytes);
cudaMemcpy(d_matA, matA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matB, matB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matC, matC, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu in-place comienza ------------------------------------
timetick = dwalltime();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
cudaThreadSynchronize();
kernel_sum_Matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
cudaThreadSynchronize();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
cudaThreadSynchronize();
kernel_mult_sum_matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
cudaThreadSynchronize();
printf("Tiempo para la ecuacion in-place GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n\n",error);
cudaMemcpy(matC, d_matC, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
//--------------------------------gpu in-place termina ------------------------------------
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
/*
//imprime la matriz matC
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
free(matA);
free(matB);
free(matC);
return 0;
} | .file "tmpxft_000d35e5_00000000-6_ejercicio1_sin_tiempo_copia.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9dwalltimev
.type _Z9dwalltimev, @function
_Z9dwalltimev:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
divsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z9dwalltimev, .-_Z9dwalltimev
.globl _Z54__device_stub__Z27ecuacion_kernel_outplace_p1PdS_S_S_jPdS_S_S_j
.type _Z54__device_stub__Z27ecuacion_kernel_outplace_p1PdS_S_S_jPdS_S_S_j, @function
_Z54__device_stub__Z27ecuacion_kernel_outplace_p1PdS_S_S_jPdS_S_S_j:
.LFB2083:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z27ecuacion_kernel_outplace_p1PdS_S_S_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z54__device_stub__Z27ecuacion_kernel_outplace_p1PdS_S_S_jPdS_S_S_j, .-_Z54__device_stub__Z27ecuacion_kernel_outplace_p1PdS_S_S_jPdS_S_S_j
.globl _Z27ecuacion_kernel_outplace_p1PdS_S_S_j
.type _Z27ecuacion_kernel_outplace_p1PdS_S_S_j, @function
_Z27ecuacion_kernel_outplace_p1PdS_S_S_j:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z27ecuacion_kernel_outplace_p1PdS_S_S_jPdS_S_S_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z27ecuacion_kernel_outplace_p1PdS_S_S_j, .-_Z27ecuacion_kernel_outplace_p1PdS_S_S_j
.globl _Z56__device_stub__Z27ecuacion_kernel_outplace_p2PdS_S_S_S_jPdS_S_S_S_j
.type _Z56__device_stub__Z27ecuacion_kernel_outplace_p2PdS_S_S_S_jPdS_S_S_S_j, @function
_Z56__device_stub__Z27ecuacion_kernel_outplace_p2PdS_S_S_S_jPdS_S_S_S_j:
.LFB2085:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
leaq 4(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z56__device_stub__Z27ecuacion_kernel_outplace_p2PdS_S_S_S_jPdS_S_S_S_j, .-_Z56__device_stub__Z27ecuacion_kernel_outplace_p2PdS_S_S_S_jPdS_S_S_S_j
.globl _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j
.type _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j, @function
_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z56__device_stub__Z27ecuacion_kernel_outplace_p2PdS_S_S_S_jPdS_S_S_S_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j, .-_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j
.globl _Z53__device_stub__Z28ecuacion_kernel_inplace_sumaPdS_S_jPdS_S_j
.type _Z53__device_stub__Z28ecuacion_kernel_inplace_sumaPdS_S_jPdS_S_j, @function
_Z53__device_stub__Z28ecuacion_kernel_inplace_sumaPdS_S_jPdS_S_j:
.LFB2087:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L27
.L23:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L28
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z28ecuacion_kernel_inplace_sumaPdS_S_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L23
.L28:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z53__device_stub__Z28ecuacion_kernel_inplace_sumaPdS_S_jPdS_S_j, .-_Z53__device_stub__Z28ecuacion_kernel_inplace_sumaPdS_S_jPdS_S_j
.globl _Z28ecuacion_kernel_inplace_sumaPdS_S_j
.type _Z28ecuacion_kernel_inplace_sumaPdS_S_j, @function
_Z28ecuacion_kernel_inplace_sumaPdS_S_j:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z28ecuacion_kernel_inplace_sumaPdS_S_jPdS_S_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z28ecuacion_kernel_inplace_sumaPdS_S_j, .-_Z28ecuacion_kernel_inplace_sumaPdS_S_j
.globl _Z42__device_stub__Z17kernel_sum_MatrizPdS_S_jPdS_S_j
.type _Z42__device_stub__Z17kernel_sum_MatrizPdS_S_jPdS_S_j, @function
_Z42__device_stub__Z17kernel_sum_MatrizPdS_S_jPdS_S_j:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L35
.L31:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L36
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L35:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17kernel_sum_MatrizPdS_S_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L31
.L36:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z42__device_stub__Z17kernel_sum_MatrizPdS_S_jPdS_S_j, .-_Z42__device_stub__Z17kernel_sum_MatrizPdS_S_jPdS_S_j
.globl _Z17kernel_sum_MatrizPdS_S_j
.type _Z17kernel_sum_MatrizPdS_S_j, @function
_Z17kernel_sum_MatrizPdS_S_j:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z17kernel_sum_MatrizPdS_S_jPdS_S_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z17kernel_sum_MatrizPdS_S_j, .-_Z17kernel_sum_MatrizPdS_S_j
.globl _Z39__device_stub__Z18kernel_transpuestaPdiPdi
.type _Z39__device_stub__Z18kernel_transpuestaPdiPdi, @function
_Z39__device_stub__Z18kernel_transpuestaPdiPdi:
.LFB2091:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L43
.L39:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L44
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z18kernel_transpuestaPdi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L39
.L44:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2091:
.size _Z39__device_stub__Z18kernel_transpuestaPdiPdi, .-_Z39__device_stub__Z18kernel_transpuestaPdiPdi
.globl _Z18kernel_transpuestaPdi
.type _Z18kernel_transpuestaPdi, @function
_Z18kernel_transpuestaPdi:
.LFB2092:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z18kernel_transpuestaPdiPdi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _Z18kernel_transpuestaPdi, .-_Z18kernel_transpuestaPdi
.globl _Z47__device_stub__Z22kernel_mult_sum_matrizPdS_S_jPdS_S_j
.type _Z47__device_stub__Z22kernel_mult_sum_matrizPdS_S_jPdS_S_j, @function
_Z47__device_stub__Z22kernel_mult_sum_matrizPdS_S_jPdS_S_j:
.LFB2093:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L51
.L47:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L52
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L51:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z22kernel_mult_sum_matrizPdS_S_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L47
.L52:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2093:
.size _Z47__device_stub__Z22kernel_mult_sum_matrizPdS_S_jPdS_S_j, .-_Z47__device_stub__Z22kernel_mult_sum_matrizPdS_S_jPdS_S_j
.globl _Z22kernel_mult_sum_matrizPdS_S_j
.type _Z22kernel_mult_sum_matrizPdS_S_j, @function
_Z22kernel_mult_sum_matrizPdS_S_j:
.LFB2094:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z22kernel_mult_sum_matrizPdS_S_jPdS_S_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2094:
.size _Z22kernel_mult_sum_matrizPdS_S_j, .-_Z22kernel_mult_sum_matrizPdS_S_j
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Falta argumento: N, CUDABLK\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "Tiempo para la ecuacion CPU: %f\n\n"
.align 8
.LC4:
.string "Tiempo para la ecuacion out-place GPU: %f\n"
.section .rodata.str1.1
.LC5:
.string "error: %d\n\n"
.section .rodata.str1.8
.align 8
.LC6:
.string "Tiempo para la ecuacion in-place GPU: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
cmpl $3, %edi
je .L56
leaq .LC1(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
.L57:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L95
movl $0, %eax
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L56:
.cfi_restore_state
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movq %rax, (%rsp)
movl %eax, 20(%rsp)
movl %eax, %r13d
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
cltq
movq %rax, 8(%rsp)
movl %r14d, %eax
imulq %rax, %rax
leaq 0(,%rax,8), %r15
movq %r15, %rdi
call malloc@PLT
movq %rax, %rbp
movq %r15, %rdi
call malloc@PLT
movq %rax, %rbx
movq %r15, %rdi
call malloc@PLT
movq %rax, %r12
movl %r14d, %eax
imull %r14d, %eax
movl %eax, 16(%rsp)
testl %eax, %eax
je .L58
movl %eax, %ecx
movl $0, %eax
.L61:
movl %eax, %edx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
movsd %xmm0, 0(%rbp,%rax,8)
movsd %xmm0, (%rbx,%rax,8)
movq $0x000000000, (%r12,%rax,8)
addq $1, %rax
cmpq %rcx, %rax
jne .L61
.L58:
leaq 40(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl 16(%rsp), %r14d
movq %r14, %rax
movq 8(%rsp), %rsi
movl $0, %edx
divq %rsi
movl $0, %edx
divq %rsi
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
call sqrt@PLT
cvttsd2siq %xmm0, %rax
movq 8(%rsp), %rsi
movl %esi, 80(%rsp)
movl %esi, 84(%rsp)
movl $1, 88(%rsp)
movl %eax, 92(%rsp)
movl %eax, 96(%rsp)
movl $1, 100(%rsp)
call _Z9dwalltimev
movsd %xmm0, 8(%rsp)
cmpl $0, (%rsp)
je .L65
movl (%rsp), %edi
movl $0, %edx
movl $0, %r10d
movl $0, %r8d
movq %r14, 24(%rsp)
.L64:
movl %edx, %r9d
movl %r10d, %esi
.L69:
movl %esi, %eax
leaq (%r12,%rax,8), %r11
movsd (%r11), %xmm1
movl %r10d, %eax
.L66:
leal (%rax,%r9), %r14d
movl %eax, %ecx
movsd (%rbx,%r14,8), %xmm0
mulsd 0(%rbp,%rcx,8), %xmm0
addsd %xmm0, %xmm1
addl $1, %eax
cmpl %edi, %eax
jne .L66
movsd %xmm1, (%r11)
addl $1, %esi
addl %r13d, %r9d
cmpl %edi, %esi
jne .L69
leal 1(%r8), %r9d
addl %r13d, %edi
addl %r13d, %r10d
subl %r13d, %edx
cmpl %r9d, %r13d
je .L87
movl %r9d, %r8d
jmp .L64
.L87:
movl %r9d, %r10d
movl $0, %edi
movl $0, %esi
.L68:
movl %esi, %edx
movl %edi, %eax
.L70:
movl %eax, %r11d
leaq (%r12,%r11,8), %rcx
movl %edx, %r14d
movsd 0(%rbp,%r14,8), %xmm0
addsd (%rbx,%r11,8), %xmm0
addsd (%rcx), %xmm0
movsd %xmm0, (%rcx)
addl $1, %eax
addl %r9d, %edx
cmpl %eax, %r10d
jne .L70
leal 1(%rsi), %eax
addl %r9d, %edi
addl %r9d, %r10d
cmpl %esi, %r8d
je .L93
movl %eax, %esi
jmp .L68
.L93:
movq 24(%rsp), %r14
.L65:
call _Z9dwalltimev
subsd 8(%rsp), %xmm0
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
cmpl $0, 16(%rsp)
je .L71
movl $0, %eax
.L74:
movl %eax, %edx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
movsd %xmm0, 0(%rbp,%rax,8)
movsd %xmm0, (%rbx,%rax,8)
movq $0x000000000, (%r12,%rax,8)
addq $1, %rax
cmpq %rax, %r14
jne .L74
.L71:
movl $1, %ecx
movq %r15, %rdx
movq %rbp, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r15, %rdx
movq %rbx, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r15, %rdx
movq %r12, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
call _Z9dwalltimev
movsd %xmm0, 8(%rsp)
movl 88(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movq 92(%rsp), %rdi
movl 100(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L96
.L75:
call cudaThreadSynchronize@PLT
movl 88(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movq 92(%rsp), %rdi
movl 100(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L97
.L76:
call cudaThreadSynchronize@PLT
call _Z9dwalltimev
subsd 8(%rsp), %xmm0
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
call cudaGetLastError@PLT
movl %eax, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2, %ecx
movq %r15, %rdx
movq 56(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
cmpl $0, 16(%rsp)
je .L77
movl $0, %eax
.L80:
movl %eax, %edx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
movsd %xmm0, 0(%rbp,%rax,8)
movsd %xmm0, (%rbx,%rax,8)
movq $0x000000000, (%r12,%rax,8)
addq $1, %rax
cmpq %rax, %r14
jne .L80
.L77:
leaq 40(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r15, %rdx
movq %rbp, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r15, %rdx
movq %rbx, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r15, %rdx
movq %r12, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
call _Z9dwalltimev
movsd %xmm0, (%rsp)
movl 88(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movq 92(%rsp), %rdi
movl 100(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L98
.L81:
call cudaThreadSynchronize@PLT
movl 88(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movq 92(%rsp), %rdi
movl 100(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L99
.L82:
call cudaThreadSynchronize@PLT
movl 88(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movq 92(%rsp), %rdi
movl 100(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L100
.L83:
call cudaThreadSynchronize@PLT
movl 88(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movq 92(%rsp), %rdi
movl 100(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L101
.L84:
call cudaThreadSynchronize@PLT
call _Z9dwalltimev
subsd (%rsp), %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
call cudaGetLastError@PLT
movl %eax, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2, %ecx
movq %r15, %rdx
movq 56(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
jmp .L57
.L96:
movl (%rsp), %r8d
movq 72(%rsp), %rcx
movq 48(%rsp), %rdx
movq 64(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z54__device_stub__Z27ecuacion_kernel_outplace_p1PdS_S_S_jPdS_S_S_j
jmp .L75
.L97:
movl (%rsp), %r9d
movq 72(%rsp), %r8
movq 64(%rsp), %rcx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z56__device_stub__Z27ecuacion_kernel_outplace_p2PdS_S_S_S_jPdS_S_S_S_j
jmp .L76
.L98:
movl 20(%rsp), %esi
movq 40(%rsp), %rdi
call _Z39__device_stub__Z18kernel_transpuestaPdiPdi
jmp .L81
.L99:
movl %r13d, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z42__device_stub__Z17kernel_sum_MatrizPdS_S_jPdS_S_j
jmp .L82
.L100:
movl 20(%rsp), %esi
movq 40(%rsp), %rdi
call _Z39__device_stub__Z18kernel_transpuestaPdiPdi
jmp .L83
.L101:
movl %r13d, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z47__device_stub__Z22kernel_mult_sum_matrizPdS_S_jPdS_S_j
jmp .L84
.L95:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC7:
.string "_Z22kernel_mult_sum_matrizPdS_S_j"
.section .rodata.str1.1
.LC8:
.string "_Z18kernel_transpuestaPdi"
.LC9:
.string "_Z17kernel_sum_MatrizPdS_S_j"
.section .rodata.str1.8
.align 8
.LC10:
.string "_Z28ecuacion_kernel_inplace_sumaPdS_S_j"
.align 8
.LC11:
.string "_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j"
.align 8
.LC12:
.string "_Z27ecuacion_kernel_outplace_p1PdS_S_S_j"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2096:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z22kernel_mult_sum_matrizPdS_S_j(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z18kernel_transpuestaPdi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z17kernel_sum_MatrizPdS_S_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z28ecuacion_kernel_inplace_sumaPdS_S_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z27ecuacion_kernel_outplace_p1PdS_S_S_j(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
//134217728
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void ecuacion_kernel_outplace_p1(double *d_matA,double *d_matAT,double *d_matB,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//transpuesta out-place A y B
if( (distA<n*n) && (distB<n*n) ){
d_matAT [distB*n + distA] = d_matA[distA*n + distB];
d_matBT [distB*n + distA] = d_matB[distA*n + distB];
}
}
__global__ void ecuacion_kernel_outplace_p2(double *d_matA,double *d_matB,double *d_matC,double *d_matAT,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
if (distA*n+distB <= (n*n - 1)){
//multiplicacion
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matBT[distB+k*n];
}
//suma
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matAT[distA*n+distB];
}
}
__global__ void ecuacion_kernel_inplace_suma (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n - 1)){
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matA[distA+distB*n];
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
__global__ void kernel_sum_Matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//suma
if (distA*n+distB < (n*n)){
d_matC[distA*n+distB] += d_matA[distA*n+distB] + d_matB[distA+distB*n];
}
}
__global__ void kernel_transpuesta(double *m, int N){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int i = int((1 + sqrtf(1 + 8*tid)) / 2);
int j = tid - (i*(i-1)/2); int aux;
if ( (i<N) && (j<N) ){
aux = m[i*N + j] ;
m[i*N + j] = m[j*N + i];
m[j*N + i] = aux;
}
}
__global__ void kernel_mult_sum_matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n)){
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N, CUDABLK\n");
return 0;
}
//declaracion de variables
cudaError_t error;
unsigned int N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi (argv[2]), gridBlock;
unsigned long numBytes = sizeof(double)*N*N;
double *matA,*matB,*matC,*d_matA,*d_matB,*d_matC,*d_matAT,*d_matBT,timetick;
unsigned int i,j,k;
//inicializa variables para cpu
matA = (double *)malloc(numBytes);
matB = (double *)malloc(numBytes);
matC = (double *)malloc(numBytes);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
//inicializa variables para gpu
cudaMalloc((void **) &d_matA, numBytes);
cudaMalloc((void **) &d_matAT, numBytes);
cudaMalloc((void **) &d_matB, numBytes);
cudaMalloc((void **) &d_matBT, numBytes);
cudaMalloc((void **) &d_matC, numBytes);
gridBlock = (unsigned int)sqrt(N*N/CUDA_BLK/CUDA_BLK);
dim3 dimBlock(CUDA_BLK,CUDA_BLK); // Bloque bidimencional de hilos (*cb* hilos)
dim3 dimGrid(gridBlock,gridBlock); // Grid bidimencional (*ceil(n/cb)* bloques)
//--------------------------------cpu comienza ------------------------------------
//secuencial
timetick = dwalltime();
//multiplicacion
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
for(k = 0; k < N ;k++){
matC[i*N+j] += matA[i*N+k] * matB[j*N+k]; //multiplica a matB por fila, eso simula la matB transpuesta
}
}
}
//suma
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
matC[i*N+j] += matB[i*N+j] + matA[i+j*N];
}
}
printf("Tiempo para la ecuacion CPU: %f\n\n",dwalltime() - timetick);
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------cpu termina ------------------------------------
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
cudaMemcpy(d_matA, matA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matB, matB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matC, matC, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu out-place comienza ------------------------------------
timetick = dwalltime();
ecuacion_kernel_outplace_p1<<<dimGrid, dimBlock>>>(d_matA, d_matAT,d_matB,d_matBT, N);
cudaThreadSynchronize();
ecuacion_kernel_outplace_p2<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC,d_matAT,d_matBT, N);
cudaThreadSynchronize();
printf("Tiempo para la ecuacion out-place GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n\n",error);
cudaMemcpy(matC, d_matC, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------gpu out-place termina ------------------------------------
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
cudaFree(d_matAT);
cudaFree(d_matBT);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
cudaMalloc((void **) &d_matA, numBytes);
cudaMalloc((void **) &d_matB, numBytes);
cudaMalloc((void **) &d_matC, numBytes);
cudaMemcpy(d_matA, matA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matB, matB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matC, matC, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu in-place comienza ------------------------------------
timetick = dwalltime();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
cudaThreadSynchronize();
kernel_sum_Matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
cudaThreadSynchronize();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
cudaThreadSynchronize();
kernel_mult_sum_matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
cudaThreadSynchronize();
printf("Tiempo para la ecuacion in-place GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n\n",error);
cudaMemcpy(matC, d_matC, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
//--------------------------------gpu in-place termina ------------------------------------
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
/*
//imprime la matriz matC
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
free(matA);
free(matB);
free(matC);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
//134217728
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void ecuacion_kernel_outplace_p1(double *d_matA,double *d_matAT,double *d_matB,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//transpuesta out-place A y B
if( (distA<n*n) && (distB<n*n) ){
d_matAT [distB*n + distA] = d_matA[distA*n + distB];
d_matBT [distB*n + distA] = d_matB[distA*n + distB];
}
}
__global__ void ecuacion_kernel_outplace_p2(double *d_matA,double *d_matB,double *d_matC,double *d_matAT,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
if (distA*n+distB <= (n*n - 1)){
//multiplicacion
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matBT[distB+k*n];
}
//suma
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matAT[distA*n+distB];
}
}
__global__ void ecuacion_kernel_inplace_suma (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n - 1)){
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matA[distA+distB*n];
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
__global__ void kernel_sum_Matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//suma
if (distA*n+distB < (n*n)){
d_matC[distA*n+distB] += d_matA[distA*n+distB] + d_matB[distA+distB*n];
}
}
__global__ void kernel_transpuesta(double *m, int N){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int i = int((1 + sqrtf(1 + 8*tid)) / 2);
int j = tid - (i*(i-1)/2); int aux;
if ( (i<N) && (j<N) ){
aux = m[i*N + j] ;
m[i*N + j] = m[j*N + i];
m[j*N + i] = aux;
}
}
__global__ void kernel_mult_sum_matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n)){
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N, CUDABLK\n");
return 0;
}
//declaracion de variables
hipError_t error;
unsigned int N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi (argv[2]), gridBlock;
unsigned long numBytes = sizeof(double)*N*N;
double *matA,*matB,*matC,*d_matA,*d_matB,*d_matC,*d_matAT,*d_matBT,timetick;
unsigned int i,j,k;
//inicializa variables para cpu
matA = (double *)malloc(numBytes);
matB = (double *)malloc(numBytes);
matC = (double *)malloc(numBytes);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
//inicializa variables para gpu
hipMalloc((void **) &d_matA, numBytes);
hipMalloc((void **) &d_matAT, numBytes);
hipMalloc((void **) &d_matB, numBytes);
hipMalloc((void **) &d_matBT, numBytes);
hipMalloc((void **) &d_matC, numBytes);
gridBlock = (unsigned int)sqrt(N*N/CUDA_BLK/CUDA_BLK);
dim3 dimBlock(CUDA_BLK,CUDA_BLK); // Bloque bidimencional de hilos (*cb* hilos)
dim3 dimGrid(gridBlock,gridBlock); // Grid bidimencional (*ceil(n/cb)* bloques)
//--------------------------------cpu comienza ------------------------------------
//secuencial
timetick = dwalltime();
//multiplicacion
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
for(k = 0; k < N ;k++){
matC[i*N+j] += matA[i*N+k] * matB[j*N+k]; //multiplica a matB por fila, eso simula la matB transpuesta
}
}
}
//suma
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
matC[i*N+j] += matB[i*N+j] + matA[i+j*N];
}
}
printf("Tiempo para la ecuacion CPU: %f\n\n",dwalltime() - timetick);
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------cpu termina ------------------------------------
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
hipMemcpy(d_matA, matA, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matB, matB, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matC, matC, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu out-place comienza ------------------------------------
timetick = dwalltime();
ecuacion_kernel_outplace_p1<<<dimGrid, dimBlock>>>(d_matA, d_matAT,d_matB,d_matBT, N);
hipDeviceSynchronize();
ecuacion_kernel_outplace_p2<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC,d_matAT,d_matBT, N);
hipDeviceSynchronize();
printf("Tiempo para la ecuacion out-place GPU: %f\n",dwalltime() - timetick);
error = hipGetLastError();
printf("error: %d\n\n",error);
hipMemcpy(matC, d_matC, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------gpu out-place termina ------------------------------------
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
hipFree(d_matAT);
hipFree(d_matBT);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
hipMalloc((void **) &d_matA, numBytes);
hipMalloc((void **) &d_matB, numBytes);
hipMalloc((void **) &d_matC, numBytes);
hipMemcpy(d_matA, matA, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matB, matB, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matC, matC, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu in-place comienza ------------------------------------
timetick = dwalltime();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
hipDeviceSynchronize();
kernel_sum_Matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
hipDeviceSynchronize();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
hipDeviceSynchronize();
kernel_mult_sum_matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
hipDeviceSynchronize();
printf("Tiempo para la ecuacion in-place GPU: %f\n",dwalltime() - timetick);
error = hipGetLastError();
printf("error: %d\n\n",error);
hipMemcpy(matC, d_matC, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
//--------------------------------gpu in-place termina ------------------------------------
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
/*
//imprime la matriz matC
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
free(matA);
free(matB);
free(matC);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
//134217728
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void ecuacion_kernel_outplace_p1(double *d_matA,double *d_matAT,double *d_matB,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//transpuesta out-place A y B
if( (distA<n*n) && (distB<n*n) ){
d_matAT [distB*n + distA] = d_matA[distA*n + distB];
d_matBT [distB*n + distA] = d_matB[distA*n + distB];
}
}
__global__ void ecuacion_kernel_outplace_p2(double *d_matA,double *d_matB,double *d_matC,double *d_matAT,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
if (distA*n+distB <= (n*n - 1)){
//multiplicacion
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matBT[distB+k*n];
}
//suma
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matAT[distA*n+distB];
}
}
__global__ void ecuacion_kernel_inplace_suma (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n - 1)){
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matA[distA+distB*n];
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
__global__ void kernel_sum_Matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//suma
if (distA*n+distB < (n*n)){
d_matC[distA*n+distB] += d_matA[distA*n+distB] + d_matB[distA+distB*n];
}
}
__global__ void kernel_transpuesta(double *m, int N){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int i = int((1 + sqrtf(1 + 8*tid)) / 2);
int j = tid - (i*(i-1)/2); int aux;
if ( (i<N) && (j<N) ){
aux = m[i*N + j] ;
m[i*N + j] = m[j*N + i];
m[j*N + i] = aux;
}
}
__global__ void kernel_mult_sum_matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n)){
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N, CUDABLK\n");
return 0;
}
//declaracion de variables
hipError_t error;
unsigned int N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi (argv[2]), gridBlock;
unsigned long numBytes = sizeof(double)*N*N;
double *matA,*matB,*matC,*d_matA,*d_matB,*d_matC,*d_matAT,*d_matBT,timetick;
unsigned int i,j,k;
//inicializa variables para cpu
matA = (double *)malloc(numBytes);
matB = (double *)malloc(numBytes);
matC = (double *)malloc(numBytes);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
//inicializa variables para gpu
hipMalloc((void **) &d_matA, numBytes);
hipMalloc((void **) &d_matAT, numBytes);
hipMalloc((void **) &d_matB, numBytes);
hipMalloc((void **) &d_matBT, numBytes);
hipMalloc((void **) &d_matC, numBytes);
gridBlock = (unsigned int)sqrt(N*N/CUDA_BLK/CUDA_BLK);
dim3 dimBlock(CUDA_BLK,CUDA_BLK); // Bloque bidimencional de hilos (*cb* hilos)
dim3 dimGrid(gridBlock,gridBlock); // Grid bidimencional (*ceil(n/cb)* bloques)
//--------------------------------cpu comienza ------------------------------------
//secuencial
timetick = dwalltime();
//multiplicacion
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
for(k = 0; k < N ;k++){
matC[i*N+j] += matA[i*N+k] * matB[j*N+k]; //multiplica a matB por fila, eso simula la matB transpuesta
}
}
}
//suma
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
matC[i*N+j] += matB[i*N+j] + matA[i+j*N];
}
}
printf("Tiempo para la ecuacion CPU: %f\n\n",dwalltime() - timetick);
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------cpu termina ------------------------------------
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
hipMemcpy(d_matA, matA, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matB, matB, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matC, matC, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu out-place comienza ------------------------------------
timetick = dwalltime();
ecuacion_kernel_outplace_p1<<<dimGrid, dimBlock>>>(d_matA, d_matAT,d_matB,d_matBT, N);
hipDeviceSynchronize();
ecuacion_kernel_outplace_p2<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC,d_matAT,d_matBT, N);
hipDeviceSynchronize();
printf("Tiempo para la ecuacion out-place GPU: %f\n",dwalltime() - timetick);
error = hipGetLastError();
printf("error: %d\n\n",error);
hipMemcpy(matC, d_matC, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------gpu out-place termina ------------------------------------
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
hipFree(d_matAT);
hipFree(d_matBT);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
hipMalloc((void **) &d_matA, numBytes);
hipMalloc((void **) &d_matB, numBytes);
hipMalloc((void **) &d_matC, numBytes);
hipMemcpy(d_matA, matA, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matB, matB, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matC, matC, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu in-place comienza ------------------------------------
timetick = dwalltime();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
hipDeviceSynchronize();
kernel_sum_Matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
hipDeviceSynchronize();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
hipDeviceSynchronize();
kernel_mult_sum_matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
hipDeviceSynchronize();
printf("Tiempo para la ecuacion in-place GPU: %f\n",dwalltime() - timetick);
error = hipGetLastError();
printf("error: %d\n\n",error);
hipMemcpy(matC, d_matC, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
//--------------------------------gpu in-place termina ------------------------------------
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
/*
//imprime la matriz matC
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
free(matA);
free(matB);
free(matC);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z27ecuacion_kernel_outplace_p1PdS_S_S_j
.globl _Z27ecuacion_kernel_outplace_p1PdS_S_S_j
.p2align 8
.type _Z27ecuacion_kernel_outplace_p1PdS_S_S_j,@function
_Z27ecuacion_kernel_outplace_p1PdS_S_S_j:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x34
s_load_b32 s2, s[0:1], 0x20
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s3, 16
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s3, v[3:4]
s_mul_i32 s3, s2, s2
v_max_u32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u32_e32 vcc_lo, s3, v2
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b256 s[4:11], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v0, s2, v[1:2]
v_mov_b32_e32 v3, 0
v_mad_u64_u32 v[8:9], null, v1, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_mov_b32_e32 v9, v3
v_lshlrev_b64 v[4:5], 3, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 3, v[8:9]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v6, vcc_lo, s4, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s6, v0
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
global_load_b64 v[6:7], v[6:7], off
v_add_co_u32 v4, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v5, vcc_lo
v_add_co_u32 v0, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s11, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b64 v[2:3], v[6:7], off
global_load_b64 v[2:3], v[4:5], off
s_waitcnt vmcnt(0)
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27ecuacion_kernel_outplace_p1PdS_S_S_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z27ecuacion_kernel_outplace_p1PdS_S_S_j, .Lfunc_end0-_Z27ecuacion_kernel_outplace_p1PdS_S_S_j
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j
.globl _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j
.p2align 8
.type _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j,@function
_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x3c
s_load_b32 s8, s[0:1], 0x28
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
v_mad_u64_u32 v[4:5], null, s15, s3, v[1:2]
v_mad_u64_u32 v[2:3], null, s14, s2, v[0:1]
s_mul_i32 s2, s8, s8
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
s_add_i32 s2, s2, -1
v_mul_lo_u32 v8, v4, s8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v8, v2
v_cmp_ge_u32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB1_5
s_load_b64 s[2:3], s[0:1], 0x10
s_cmp_eq_u32 s8, 0
s_mov_b32 s9, 0
s_cbranch_scc1 .LBB1_4
v_mov_b32_e32 v1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 3, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s2, v4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
global_load_b64 v[6:7], v[4:5], off
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x0
s_load_b64 s[6:7], s[0:1], 0x20
.p2align 6
.LBB1_3:
v_dual_mov_b32 v10, 0 :: v_dual_add_nc_u32 v9, s9, v8
s_add_i32 s9, s9, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_lg_u32 s8, s9
v_mov_b32_e32 v3, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[9:10], 3, v[9:10]
v_lshlrev_b64 v[11:12], 3, v[2:3]
v_add_nc_u32_e32 v2, s8, v2
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v9, vcc_lo, s4, v9
v_add_co_ci_u32_e32 v10, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v11, vcc_lo, s6, v11
v_add_co_ci_u32_e32 v12, vcc_lo, s7, v12, vcc_lo
global_load_b64 v[9:10], v[9:10], off
global_load_b64 v[11:12], v[11:12], off
s_waitcnt vmcnt(0)
v_fma_f64 v[6:7], v[9:10], v[11:12], v[6:7]
global_store_b64 v[4:5], v[6:7], off
s_cbranch_scc1 .LBB1_3
.LBB1_4:
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x8
s_load_b64 s[0:1], s[0:1], 0x18
v_mov_b32_e32 v1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
global_load_b64 v[2:3], v[2:3], off
global_load_b64 v[4:5], v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b64 v[6:7], v[0:1], off
s_waitcnt vmcnt(1)
v_add_f64 v[2:3], v[2:3], v[4:5]
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f64 v[2:3], v[6:7], v[2:3]
global_store_b64 v[0:1], v[2:3], off
.LBB1_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j, .Lfunc_end1-_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z28ecuacion_kernel_inplace_sumaPdS_S_j
.globl _Z28ecuacion_kernel_inplace_sumaPdS_S_j
.p2align 8
.type _Z28ecuacion_kernel_inplace_sumaPdS_S_j,@function
_Z28ecuacion_kernel_inplace_sumaPdS_S_j:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v3, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s3, 16
s_and_b32 s3, s3, 0xffff
v_mad_u64_u32 v[1:2], null, s15, s4, v[3:4]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s14, s3, v[0:1]
v_mul_lo_u32 v0, v1, s2
s_mul_i32 s3, s2, s2
s_add_i32 s3, s3, -1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v3, v0, v2
v_cmp_gt_u32_e32 vcc_lo, s3, v3
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB2_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v2, v2, s2
v_mov_b32_e32 v4, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_cmp_eq_u32 s2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[5:6], 3, v[3:4]
v_add_nc_u32_e32 v3, v2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[3:4], 3, v[3:4]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v7, vcc_lo, s6, v5
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v6, vcc_lo
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
global_load_b64 v[7:8], v[7:8], off
global_load_b64 v[9:10], v[3:4], off
v_add_co_u32 v4, vcc_lo, s0, v5
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v6, vcc_lo
global_load_b64 v[11:12], v[4:5], off
s_waitcnt vmcnt(1)
v_add_f64 v[6:7], v[7:8], v[9:10]
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f64 v[6:7], v[11:12], v[6:7]
global_store_b64 v[4:5], v[6:7], off
s_cbranch_scc1 .LBB2_4
global_load_b64 v[6:7], v[4:5], off
.p2align 6
.LBB2_3:
v_mov_b32_e32 v1, 0
s_add_i32 s2, s2, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_lg_u32 s2, 0
v_mov_b32_e32 v3, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[10:11], 3, v[2:3]
v_add_nc_u32_e32 v2, 1, v2
v_lshlrev_b64 v[8:9], 3, v[0:1]
v_add_nc_u32_e32 v0, 1, v0
v_add_co_u32 v8, vcc_lo, s4, v8
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v9, vcc_lo, s5, v9, vcc_lo
v_add_co_u32 v10, vcc_lo, s6, v10
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v11, vcc_lo
global_load_b64 v[8:9], v[8:9], off
global_load_b64 v[10:11], v[10:11], off
s_waitcnt vmcnt(0)
v_fma_f64 v[6:7], v[8:9], v[10:11], v[6:7]
global_store_b64 v[4:5], v[6:7], off
s_cbranch_scc1 .LBB2_3
.LBB2_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z28ecuacion_kernel_inplace_sumaPdS_S_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z28ecuacion_kernel_inplace_sumaPdS_S_j, .Lfunc_end2-_Z28ecuacion_kernel_inplace_sumaPdS_S_j
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z17kernel_sum_MatrizPdS_S_j
.globl _Z17kernel_sum_MatrizPdS_S_j
.p2align 8
.type _Z17kernel_sum_MatrizPdS_S_j,@function
_Z17kernel_sum_MatrizPdS_S_j:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v5, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s3, 16
s_and_b32 s3, s3, 0xffff
v_mad_u64_u32 v[0:1], null, s15, s4, v[2:3]
v_mad_u64_u32 v[3:4], null, s14, s3, v[5:6]
s_mul_i32 s3, s2, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, v0, s2, v[3:4]
v_cmp_gt_u32_e32 vcc_lo, s3, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB3_2
s_load_b128 s[4:7], s[0:1], 0x0
v_mov_b32_e32 v2, 0
v_mad_u64_u32 v[4:5], null, v3, s2, v[0:1]
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mov_b32_e32 v5, v2
v_lshlrev_b64 v[0:1], 3, v[1:2]
v_lshlrev_b64 v[2:3], 3, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b64 v[4:5], v[4:5], off
global_load_b64 v[2:3], v[2:3], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b64 v[6:7], v[0:1], off
s_waitcnt vmcnt(1)
v_add_f64 v[2:3], v[4:5], v[2:3]
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f64 v[2:3], v[6:7], v[2:3]
global_store_b64 v[0:1], v[2:3], off
.LBB3_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17kernel_sum_MatrizPdS_S_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z17kernel_sum_MatrizPdS_S_j, .Lfunc_end3-_Z17kernel_sum_MatrizPdS_S_j
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z18kernel_transpuestaPdi
.globl _Z18kernel_transpuestaPdi
.p2align 8
.type _Z18kernel_transpuestaPdi,@function
_Z18kernel_transpuestaPdi:
s_load_b32 s2, s[0:1], 0x1c
s_mov_b32 s3, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_lshl_or_b32 v0, v1, 3, 1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f32_i32_e32 v0, v0
v_mul_f32_e32 v2, 0x4f800000, v0
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
v_sqrt_f32_e32 v2, v0
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v3, -1, v2
v_add_nc_u32_e32 v4, 1, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v5, -v3, v2, v0
v_fma_f32 v6, -v4, v2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ge_f32_e64 s2, 0, v5
v_cndmask_b32_e64 v2, v2, v3, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_f32_e64 s2, 0, v6
v_cndmask_b32_e64 v2, v2, v4, s2
s_load_b32 s2, s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v3, 0x37800000, v2
v_cndmask_b32_e32 v2, v2, v3, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v0, 0x260
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v0, v2, v0, vcc_lo
v_add_f32_e32 v0, 1.0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v0, 0.5, v0
v_cvt_i32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, -1, v0
v_mul_lo_u32 v2, v2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v3, 31, v2
v_add_nc_u32_e32 v2, v2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 1, v2
v_sub_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_max_i32_e32 v2, v0, v1
s_waitcnt lgkmcnt(0)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB4_2
s_load_b64 s[0:1], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v0, s2, v[1:2]
v_mad_u64_u32 v[6:7], null, v1, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v3, 31, v2
v_ashrrev_i32_e32 v7, 31, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 3, v[2:3]
v_lshlrev_b64 v[0:1], 3, v[6:7]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_clause 0x1
global_load_b64 v[4:5], v[2:3], off
global_load_b64 v[6:7], v[0:1], off
s_waitcnt vmcnt(1)
v_cvt_i32_f64_e32 v4, v[4:5]
s_delay_alu instid0(VALU_DEP_1)
v_cvt_f64_i32_e32 v[4:5], v4
s_waitcnt vmcnt(0)
s_clause 0x1
global_store_b64 v[2:3], v[6:7], off
global_store_b64 v[0:1], v[4:5], off
.LBB4_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18kernel_transpuestaPdi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z18kernel_transpuestaPdi, .Lfunc_end4-_Z18kernel_transpuestaPdi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z22kernel_mult_sum_matrizPdS_S_j
.globl _Z22kernel_mult_sum_matrizPdS_S_j
.p2align 8
.type _Z22kernel_mult_sum_matrizPdS_S_j,@function
_Z22kernel_mult_sum_matrizPdS_S_j:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s4, s[0:1], 0x18
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
v_mad_u64_u32 v[3:4], null, s15, s3, v[1:2]
v_mad_u64_u32 v[1:2], null, s14, s2, v[0:1]
s_mul_i32 s2, s4, s4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v3, s4
v_add_nc_u32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u32_e32 vcc_lo, s2, v2
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB5_3
s_load_b64 s[2:3], s[0:1], 0x10
v_mov_b32_e32 v3, 0
v_mul_lo_u32 v6, v1, s4
s_max_u32 s4, s4, 1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 3, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
s_load_b128 s[0:3], s[0:1], 0x0
global_load_b64 v[4:5], v[2:3], off
.p2align 6
.LBB5_2:
v_mov_b32_e32 v1, 0
s_add_i32 s4, s4, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_lg_u32 s4, 0
v_mov_b32_e32 v7, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[10:11], 3, v[6:7]
v_add_nc_u32_e32 v6, 1, v6
v_lshlrev_b64 v[8:9], 3, v[0:1]
v_add_nc_u32_e32 v0, 1, v0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v7, vcc_lo, s0, v8
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v8, vcc_lo, s1, v9, vcc_lo
v_add_co_u32 v9, vcc_lo, s2, v10
v_add_co_ci_u32_e32 v10, vcc_lo, s3, v11, vcc_lo
global_load_b64 v[7:8], v[7:8], off
global_load_b64 v[9:10], v[9:10], off
s_waitcnt vmcnt(0)
v_fma_f64 v[4:5], v[7:8], v[9:10], v[4:5]
global_store_b64 v[2:3], v[4:5], off
s_cbranch_scc1 .LBB5_2
.LBB5_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z22kernel_mult_sum_matrizPdS_S_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end5:
.size _Z22kernel_mult_sum_matrizPdS_S_j, .Lfunc_end5-_Z22kernel_mult_sum_matrizPdS_S_j
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27ecuacion_kernel_outplace_p1PdS_S_S_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z27ecuacion_kernel_outplace_p1PdS_S_S_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z28ecuacion_kernel_inplace_sumaPdS_S_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z28ecuacion_kernel_inplace_sumaPdS_S_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17kernel_sum_MatrizPdS_S_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17kernel_sum_MatrizPdS_S_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18kernel_transpuestaPdi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z18kernel_transpuestaPdi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z22kernel_mult_sum_matrizPdS_S_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z22kernel_mult_sum_matrizPdS_S_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
//134217728
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void ecuacion_kernel_outplace_p1(double *d_matA,double *d_matAT,double *d_matB,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//transpuesta out-place A y B
if( (distA<n*n) && (distB<n*n) ){
d_matAT [distB*n + distA] = d_matA[distA*n + distB];
d_matBT [distB*n + distA] = d_matB[distA*n + distB];
}
}
__global__ void ecuacion_kernel_outplace_p2(double *d_matA,double *d_matB,double *d_matC,double *d_matAT,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
if (distA*n+distB <= (n*n - 1)){
//multiplicacion
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matBT[distB+k*n];
}
//suma
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matAT[distA*n+distB];
}
}
__global__ void ecuacion_kernel_inplace_suma (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n - 1)){
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matA[distA+distB*n];
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
__global__ void kernel_sum_Matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//suma
if (distA*n+distB < (n*n)){
d_matC[distA*n+distB] += d_matA[distA*n+distB] + d_matB[distA+distB*n];
}
}
__global__ void kernel_transpuesta(double *m, int N){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int i = int((1 + sqrtf(1 + 8*tid)) / 2);
int j = tid - (i*(i-1)/2); int aux;
if ( (i<N) && (j<N) ){
aux = m[i*N + j] ;
m[i*N + j] = m[j*N + i];
m[j*N + i] = aux;
}
}
__global__ void kernel_mult_sum_matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n)){
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N, CUDABLK\n");
return 0;
}
//declaracion de variables
hipError_t error;
unsigned int N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi (argv[2]), gridBlock;
unsigned long numBytes = sizeof(double)*N*N;
double *matA,*matB,*matC,*d_matA,*d_matB,*d_matC,*d_matAT,*d_matBT,timetick;
unsigned int i,j,k;
//inicializa variables para cpu
matA = (double *)malloc(numBytes);
matB = (double *)malloc(numBytes);
matC = (double *)malloc(numBytes);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
//inicializa variables para gpu
hipMalloc((void **) &d_matA, numBytes);
hipMalloc((void **) &d_matAT, numBytes);
hipMalloc((void **) &d_matB, numBytes);
hipMalloc((void **) &d_matBT, numBytes);
hipMalloc((void **) &d_matC, numBytes);
gridBlock = (unsigned int)sqrt(N*N/CUDA_BLK/CUDA_BLK);
dim3 dimBlock(CUDA_BLK,CUDA_BLK); // Bloque bidimencional de hilos (*cb* hilos)
dim3 dimGrid(gridBlock,gridBlock); // Grid bidimencional (*ceil(n/cb)* bloques)
//--------------------------------cpu comienza ------------------------------------
//secuencial
timetick = dwalltime();
//multiplicacion
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
for(k = 0; k < N ;k++){
matC[i*N+j] += matA[i*N+k] * matB[j*N+k]; //multiplica a matB por fila, eso simula la matB transpuesta
}
}
}
//suma
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
matC[i*N+j] += matB[i*N+j] + matA[i+j*N];
}
}
printf("Tiempo para la ecuacion CPU: %f\n\n",dwalltime() - timetick);
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------cpu termina ------------------------------------
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
hipMemcpy(d_matA, matA, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matB, matB, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matC, matC, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu out-place comienza ------------------------------------
timetick = dwalltime();
ecuacion_kernel_outplace_p1<<<dimGrid, dimBlock>>>(d_matA, d_matAT,d_matB,d_matBT, N);
hipDeviceSynchronize();
ecuacion_kernel_outplace_p2<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC,d_matAT,d_matBT, N);
hipDeviceSynchronize();
printf("Tiempo para la ecuacion out-place GPU: %f\n",dwalltime() - timetick);
error = hipGetLastError();
printf("error: %d\n\n",error);
hipMemcpy(matC, d_matC, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------gpu out-place termina ------------------------------------
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
hipFree(d_matAT);
hipFree(d_matBT);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
hipMalloc((void **) &d_matA, numBytes);
hipMalloc((void **) &d_matB, numBytes);
hipMalloc((void **) &d_matC, numBytes);
hipMemcpy(d_matA, matA, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matB, matB, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matC, matC, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu in-place comienza ------------------------------------
timetick = dwalltime();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
hipDeviceSynchronize();
kernel_sum_Matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
hipDeviceSynchronize();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
hipDeviceSynchronize();
kernel_mult_sum_matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
hipDeviceSynchronize();
printf("Tiempo para la ecuacion in-place GPU: %f\n",dwalltime() - timetick);
error = hipGetLastError();
printf("error: %d\n\n",error);
hipMemcpy(matC, d_matC, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
//--------------------------------gpu in-place termina ------------------------------------
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
/*
//imprime la matriz matC
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
free(matA);
free(matB);
free(matC);
return 0;
} | .text
.file "ejercicio1_sin_tiempo_copia.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z9dwalltimev
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z9dwalltimev
.p2align 4, 0x90
.type _Z9dwalltimev,@function
_Z9dwalltimev: # @_Z9dwalltimev
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
divsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z9dwalltimev, .Lfunc_end0-_Z9dwalltimev
.cfi_endproc
# -- End function
.globl _Z42__device_stub__ecuacion_kernel_outplace_p1PdS_S_S_j # -- Begin function _Z42__device_stub__ecuacion_kernel_outplace_p1PdS_S_S_j
.p2align 4, 0x90
.type _Z42__device_stub__ecuacion_kernel_outplace_p1PdS_S_S_j,@function
_Z42__device_stub__ecuacion_kernel_outplace_p1PdS_S_S_j: # @_Z42__device_stub__ecuacion_kernel_outplace_p1PdS_S_S_j
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z27ecuacion_kernel_outplace_p1PdS_S_S_j, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end1:
.size _Z42__device_stub__ecuacion_kernel_outplace_p1PdS_S_S_j, .Lfunc_end1-_Z42__device_stub__ecuacion_kernel_outplace_p1PdS_S_S_j
.cfi_endproc
# -- End function
.globl _Z42__device_stub__ecuacion_kernel_outplace_p2PdS_S_S_S_j # -- Begin function _Z42__device_stub__ecuacion_kernel_outplace_p2PdS_S_S_S_j
.p2align 4, 0x90
.type _Z42__device_stub__ecuacion_kernel_outplace_p2PdS_S_S_S_j,@function
_Z42__device_stub__ecuacion_kernel_outplace_p2PdS_S_S_S_j: # @_Z42__device_stub__ecuacion_kernel_outplace_p2PdS_S_S_S_j
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movl %r9d, 4(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z42__device_stub__ecuacion_kernel_outplace_p2PdS_S_S_S_j, .Lfunc_end2-_Z42__device_stub__ecuacion_kernel_outplace_p2PdS_S_S_S_j
.cfi_endproc
# -- End function
.globl _Z43__device_stub__ecuacion_kernel_inplace_sumaPdS_S_j # -- Begin function _Z43__device_stub__ecuacion_kernel_inplace_sumaPdS_S_j
.p2align 4, 0x90
.type _Z43__device_stub__ecuacion_kernel_inplace_sumaPdS_S_j,@function
_Z43__device_stub__ecuacion_kernel_inplace_sumaPdS_S_j: # @_Z43__device_stub__ecuacion_kernel_inplace_sumaPdS_S_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z28ecuacion_kernel_inplace_sumaPdS_S_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z43__device_stub__ecuacion_kernel_inplace_sumaPdS_S_j, .Lfunc_end3-_Z43__device_stub__ecuacion_kernel_inplace_sumaPdS_S_j
.cfi_endproc
# -- End function
.globl _Z32__device_stub__kernel_sum_MatrizPdS_S_j # -- Begin function _Z32__device_stub__kernel_sum_MatrizPdS_S_j
.p2align 4, 0x90
.type _Z32__device_stub__kernel_sum_MatrizPdS_S_j,@function
_Z32__device_stub__kernel_sum_MatrizPdS_S_j: # @_Z32__device_stub__kernel_sum_MatrizPdS_S_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17kernel_sum_MatrizPdS_S_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end4:
.size _Z32__device_stub__kernel_sum_MatrizPdS_S_j, .Lfunc_end4-_Z32__device_stub__kernel_sum_MatrizPdS_S_j
.cfi_endproc
# -- End function
.globl _Z33__device_stub__kernel_transpuestaPdi # -- Begin function _Z33__device_stub__kernel_transpuestaPdi
.p2align 4, 0x90
.type _Z33__device_stub__kernel_transpuestaPdi,@function
_Z33__device_stub__kernel_transpuestaPdi: # @_Z33__device_stub__kernel_transpuestaPdi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z18kernel_transpuestaPdi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end5:
.size _Z33__device_stub__kernel_transpuestaPdi, .Lfunc_end5-_Z33__device_stub__kernel_transpuestaPdi
.cfi_endproc
# -- End function
.globl _Z37__device_stub__kernel_mult_sum_matrizPdS_S_j # -- Begin function _Z37__device_stub__kernel_mult_sum_matrizPdS_S_j
.p2align 4, 0x90
.type _Z37__device_stub__kernel_mult_sum_matrizPdS_S_j,@function
_Z37__device_stub__kernel_mult_sum_matrizPdS_S_j: # @_Z37__device_stub__kernel_mult_sum_matrizPdS_S_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z22kernel_mult_sum_matrizPdS_S_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end6:
.size _Z37__device_stub__kernel_mult_sum_matrizPdS_S_j, .Lfunc_end6-_Z37__device_stub__kernel_mult_sum_matrizPdS_S_j
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI7_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $264, %rsp # imm = 0x108
.cfi_def_cfa_offset 320
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $3, %edi
jne .LBB7_1
# %bb.2:
movq 8(%rsi), %rdi
movq %rsi, %rbx
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r13
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 96(%rsp) # 8-byte Spill
cltq
movq %rax, 184(%rsp) # 8-byte Spill
movl %r13d, %ebp
imulq %rbp, %rbp
shlq $3, %rbp
movq %rbp, %rdi
callq malloc
movq %rax, %rbx
movq %rbp, %rdi
callq malloc
movq %rax, %r14
movq %rbp, %rdi
callq malloc
movq %rax, %r15
movl %r13d, %ecx
imull %r13d, %ecx
movl %ecx, %eax
movl %ecx, 196(%rsp) # 4-byte Spill
testl %ecx, %ecx
movq %rax, 8(%rsp) # 8-byte Spill
je .LBB7_5
# %bb.3: # %.lr.ph.preheader
leaq (,%rax,8), %rdx
xorl %r12d, %r12d
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
movq 8(%rsp), %rcx # 8-byte Reload
.p2align 4, 0x90
.LBB7_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %r12d, %eax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movsd %xmm0, (%rbx,%r12,8)
movsd %xmm0, (%r14,%r12,8)
incq %r12
cmpq %r12, %rcx
jne .LBB7_4
.LBB7_5: # %._crit_edge
leaq 16(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
leaq 232(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
leaq 104(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
leaq 224(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
leaq 88(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
movq 8(%rsp), %rax # 8-byte Reload
xorl %edx, %edx
movq 184(%rsp), %rcx # 8-byte Reload
divq %rcx
xorl %edx, %edx
divq %rcx
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
xorpd %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB7_7
# %bb.6:
sqrtsd %xmm0, %xmm0
jmp .LBB7_8
.LBB7_1:
movl $.Lstr, %edi
callq puts@PLT
jmp .LBB7_38
.LBB7_7: # %call.sqrt
callq sqrt
.LBB7_8: # %._crit_edge.split
cvttsd2si %xmm0, %rdx
movq 96(%rsp), %rcx # 8-byte Reload
movl %ecx, %eax
movq %rax, 184(%rsp) # 8-byte Spill
shlq $32, %rcx
movq %rcx, 96(%rsp) # 8-byte Spill
movl %edx, %eax
movq %rax, 216(%rsp) # 8-byte Spill
shlq $32, %rdx
movq %rdx, 200(%rsp) # 8-byte Spill
xorl %r12d, %r12d
leaq 128(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 128(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 136(%rsp), %xmm1
divsd .LCPI7_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 256(%rsp) # 8-byte Spill
testl %r13d, %r13d
movq %rbp, 208(%rsp) # 8-byte Spill
je .LBB7_19
# %bb.9: # %.preheader241.preheader
movl %r13d, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB7_10: # %.preheader241
# =>This Loop Header: Depth=1
# Child Loop BB7_11 Depth 2
# Child Loop BB7_12 Depth 3
movl %ecx, %edx
imull %r13d, %edx
xorl %esi, %esi
xorl %edi, %edi
.p2align 4, 0x90
.LBB7_11: # %.preheader240
# Parent Loop BB7_10 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB7_12 Depth 3
leal (%rdx,%rdi), %r8d
movsd (%r15,%r8,8), %xmm0 # xmm0 = mem[0],zero
movq %rax, %r9
movl %r12d, %r10d
movq %rsi, %r11
.p2align 4, 0x90
.LBB7_12: # Parent Loop BB7_10 Depth=1
# Parent Loop BB7_11 Depth=2
# => This Inner Loop Header: Depth=3
movl %r10d, %ebp
movsd (%rbx,%rbp,8), %xmm1 # xmm1 = mem[0],zero
movl %r11d, %ebp
mulsd (%r14,%rbp,8), %xmm1
addsd %xmm1, %xmm0
incq %r11
incl %r10d
decq %r9
jne .LBB7_12
# %bb.13: # in Loop: Header=BB7_11 Depth=2
movsd %xmm0, (%r15,%r8,8)
incq %rdi
addq %r13, %rsi
cmpq %rax, %rdi
jne .LBB7_11
# %bb.14: # in Loop: Header=BB7_10 Depth=1
incl %ecx
addl %r13d, %r12d
cmpl %r13d, %ecx
jne .LBB7_10
# %bb.15: # %.preheader.preheader
xorl %ecx, %ecx
xorl %edx, %edx
movq 208(%rsp), %rbp # 8-byte Reload
.p2align 4, 0x90
.LBB7_16: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB7_17 Depth 2
movq %rax, %rsi
movl %ecx, %edi
movl %edx, %r8d
.p2align 4, 0x90
.LBB7_17: # Parent Loop BB7_16 Depth=1
# => This Inner Loop Header: Depth=2
movl %edi, %r9d
movsd (%r14,%r9,8), %xmm0 # xmm0 = mem[0],zero
movl %r8d, %r10d
addsd (%rbx,%r10,8), %xmm0
addsd (%r15,%r9,8), %xmm0
movsd %xmm0, (%r15,%r9,8)
addl %r13d, %r8d
incl %edi
decq %rsi
jne .LBB7_17
# %bb.18: # in Loop: Header=BB7_16 Depth=1
incl %edx
addl %r13d, %ecx
cmpl %r13d, %edx
jne .LBB7_16
.LBB7_19: # %._crit_edge249
movq 96(%rsp), %rax # 8-byte Reload
addq %rax, 184(%rsp) # 8-byte Folded Spill
movq 200(%rsp), %rax # 8-byte Reload
addq %rax, 216(%rsp) # 8-byte Folded Spill
leaq 128(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 128(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 136(%rsp), %xmm0
divsd .LCPI7_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 256(%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.1, %edi
movb $1, %al
callq printf
cmpl $0, 196(%rsp) # 4-byte Folded Reload
movq 8(%rsp), %rax # 8-byte Reload
je .LBB7_22
# %bb.20: # %.lr.ph252.preheader
leaq (,%rax,8), %rdx
xorl %r12d, %r12d
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
movq 8(%rsp), %rcx # 8-byte Reload
.p2align 4, 0x90
.LBB7_21: # %.lr.ph252
# =>This Inner Loop Header: Depth=1
movl %r12d, %eax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movsd %xmm0, (%rbx,%r12,8)
movsd %xmm0, (%r14,%r12,8)
incq %r12
cmpq %r12, %rcx
jne .LBB7_21
.LBB7_22: # %._crit_edge253
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movq 104(%rsp), %rdi
movq %r14, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movq 88(%rsp), %rdi
movq %r15, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 128(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 128(%rsp), %xmm0
movsd %xmm0, 200(%rsp) # 8-byte Spill
xorps %xmm0, %xmm0
cvtsi2sdq 136(%rsp), %xmm0
divsd .LCPI7_0(%rip), %xmm0
movsd %xmm0, 96(%rsp) # 8-byte Spill
movq 216(%rsp), %rbp # 8-byte Reload
movq %rbp, %rdi
movl $1, %esi
movq 184(%rsp), %r12 # 8-byte Reload
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_24
# %bb.23:
movq 16(%rsp), %rax
movq 232(%rsp), %rcx
movq 104(%rsp), %rdx
movq 224(%rsp), %rsi
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rsi, 24(%rsp)
movl %r13d, 240(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rax
movq %rax, 144(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
leaq 240(%rsp), %rax
movq %rax, 160(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 120(%rsp), %rdx
leaq 112(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z27ecuacion_kernel_outplace_p1PdS_S_S_j, %edi
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
pushq 128(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB7_24:
movsd 96(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
addsd 200(%rsp), %xmm0 # 8-byte Folded Reload
movsd %xmm0, 96(%rsp) # 8-byte Spill
callq hipDeviceSynchronize
movq %rbp, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_26
# %bb.25:
movq 16(%rsp), %rax
movq 104(%rsp), %rcx
movq 88(%rsp), %rdx
movq 232(%rsp), %rsi
movq 224(%rsp), %rdi
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rsi, 24(%rsp)
movq %rdi, 120(%rsp)
movl %r13d, 252(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rax
movq %rax, 144(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 252(%rsp), %rax
movq %rax, 168(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 112(%rsp), %rdx
leaq 240(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j, %edi
pushq 240(%rsp)
.cfi_adjust_cfa_offset 8
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB7_26:
callq hipDeviceSynchronize
leaq 128(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 128(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 136(%rsp), %xmm0
divsd .LCPI7_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 96(%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.2, %edi
movb $1, %al
callq printf
callq hipGetLastError
movl $.L.str.3, %edi
movl %eax, %esi
xorl %eax, %eax
callq printf
movq 88(%rsp), %rsi
movq %r15, %rdi
movq 208(%rsp), %rbp # 8-byte Reload
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 104(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq 232(%rsp), %rdi
callq hipFree
movq 224(%rsp), %rdi
callq hipFree
cmpl $0, 196(%rsp) # 4-byte Folded Reload
movq 8(%rsp), %rax # 8-byte Reload
je .LBB7_29
# %bb.27: # %.lr.ph256.preheader
leaq (,%rax,8), %rdx
xorl %r12d, %r12d
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
movq 8(%rsp), %rcx # 8-byte Reload
.p2align 4, 0x90
.LBB7_28: # %.lr.ph256
# =>This Inner Loop Header: Depth=1
movl %r12d, %eax
xorps %xmm0, %xmm0
cvtsi2sd %rax, %xmm0
movsd %xmm0, (%rbx,%r12,8)
movsd %xmm0, (%r14,%r12,8)
incq %r12
cmpq %r12, %rcx
jne .LBB7_28
.LBB7_29: # %._crit_edge257
leaq 16(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
leaq 104(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
leaq 88(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movq 104(%rsp), %rdi
movq %r14, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movq 88(%rsp), %rdi
movq %r15, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 128(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 128(%rsp), %xmm0
movsd %xmm0, 96(%rsp) # 8-byte Spill
xorps %xmm0, %xmm0
cvtsi2sdq 136(%rsp), %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
movq 216(%rsp), %rbp # 8-byte Reload
movq %rbp, %rdi
movl $1, %esi
movq 184(%rsp), %r12 # 8-byte Reload
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_31
# %bb.30:
movq 16(%rsp), %rax
movq %rax, 80(%rsp)
movl %r13d, 24(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z18kernel_transpuestaPdi, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB7_31:
callq hipDeviceSynchronize
movq %rbp, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_33
# %bb.32:
movq 16(%rsp), %rax
movq 104(%rsp), %rcx
movq 88(%rsp), %rdx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movq %rdx, 64(%rsp)
movl %r13d, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rax
movq %rax, 144(%rsp)
leaq 112(%rsp), %rax
movq %rax, 152(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z17kernel_sum_MatrizPdS_S_j, %edi
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB7_33:
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
divsd .LCPI7_0(%rip), %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
callq hipDeviceSynchronize
movq %rbp, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_35
# %bb.34:
movq 16(%rsp), %rax
movq %rax, 80(%rsp)
movl %r13d, 24(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z18kernel_transpuestaPdi, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB7_35:
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
addsd 96(%rsp), %xmm0 # 8-byte Folded Reload
movsd %xmm0, 8(%rsp) # 8-byte Spill
callq hipDeviceSynchronize
movq %rbp, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_37
# %bb.36:
movq 16(%rsp), %rax
movq 104(%rsp), %rcx
movq 88(%rsp), %rdx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movq %rdx, 64(%rsp)
movl %r13d, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rax
movq %rax, 144(%rsp)
leaq 112(%rsp), %rax
movq %rax, 152(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z22kernel_mult_sum_matrizPdS_S_j, %edi
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB7_37:
callq hipDeviceSynchronize
leaq 128(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 128(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 136(%rsp), %xmm0
divsd .LCPI7_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 8(%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.4, %edi
movb $1, %al
callq printf
callq hipGetLastError
movl $.L.str.3, %edi
movl %eax, %esi
xorl %eax, %eax
callq printf
movq 88(%rsp), %rsi
movq %r15, %rdi
movq 208(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
callq hipFree
movq 104(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
.LBB7_38:
xorl %eax, %eax
addq $264, %rsp # imm = 0x108
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size main, .Lfunc_end7-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z27ecuacion_kernel_outplace_p1PdS_S_S_j, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z28ecuacion_kernel_inplace_sumaPdS_S_j, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17kernel_sum_MatrizPdS_S_j, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18kernel_transpuestaPdi, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z22kernel_mult_sum_matrizPdS_S_j, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z27ecuacion_kernel_outplace_p1PdS_S_S_j,@object # @_Z27ecuacion_kernel_outplace_p1PdS_S_S_j
.section .rodata,"a",@progbits
.globl _Z27ecuacion_kernel_outplace_p1PdS_S_S_j
.p2align 3, 0x0
_Z27ecuacion_kernel_outplace_p1PdS_S_S_j:
.quad _Z42__device_stub__ecuacion_kernel_outplace_p1PdS_S_S_j
.size _Z27ecuacion_kernel_outplace_p1PdS_S_S_j, 8
.type _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j,@object # @_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j
.globl _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j
.p2align 3, 0x0
_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j:
.quad _Z42__device_stub__ecuacion_kernel_outplace_p2PdS_S_S_S_j
.size _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j, 8
.type _Z28ecuacion_kernel_inplace_sumaPdS_S_j,@object # @_Z28ecuacion_kernel_inplace_sumaPdS_S_j
.globl _Z28ecuacion_kernel_inplace_sumaPdS_S_j
.p2align 3, 0x0
_Z28ecuacion_kernel_inplace_sumaPdS_S_j:
.quad _Z43__device_stub__ecuacion_kernel_inplace_sumaPdS_S_j
.size _Z28ecuacion_kernel_inplace_sumaPdS_S_j, 8
.type _Z17kernel_sum_MatrizPdS_S_j,@object # @_Z17kernel_sum_MatrizPdS_S_j
.globl _Z17kernel_sum_MatrizPdS_S_j
.p2align 3, 0x0
_Z17kernel_sum_MatrizPdS_S_j:
.quad _Z32__device_stub__kernel_sum_MatrizPdS_S_j
.size _Z17kernel_sum_MatrizPdS_S_j, 8
.type _Z18kernel_transpuestaPdi,@object # @_Z18kernel_transpuestaPdi
.globl _Z18kernel_transpuestaPdi
.p2align 3, 0x0
_Z18kernel_transpuestaPdi:
.quad _Z33__device_stub__kernel_transpuestaPdi
.size _Z18kernel_transpuestaPdi, 8
.type _Z22kernel_mult_sum_matrizPdS_S_j,@object # @_Z22kernel_mult_sum_matrizPdS_S_j
.globl _Z22kernel_mult_sum_matrizPdS_S_j
.p2align 3, 0x0
_Z22kernel_mult_sum_matrizPdS_S_j:
.quad _Z37__device_stub__kernel_mult_sum_matrizPdS_S_j
.size _Z22kernel_mult_sum_matrizPdS_S_j, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Tiempo para la ecuacion CPU: %f\n\n"
.size .L.str.1, 34
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Tiempo para la ecuacion out-place GPU: %f\n"
.size .L.str.2, 43
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "error: %d\n\n"
.size .L.str.3, 12
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Tiempo para la ecuacion in-place GPU: %f\n"
.size .L.str.4, 42
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z27ecuacion_kernel_outplace_p1PdS_S_S_j"
.size .L__unnamed_1, 41
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j"
.size .L__unnamed_2, 43
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z28ecuacion_kernel_inplace_sumaPdS_S_j"
.size .L__unnamed_3, 40
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z17kernel_sum_MatrizPdS_S_j"
.size .L__unnamed_4, 29
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z18kernel_transpuestaPdi"
.size .L__unnamed_5, 26
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "_Z22kernel_mult_sum_matrizPdS_S_j"
.size .L__unnamed_6, 34
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Falta argumento: N, CUDABLK"
.size .Lstr, 28
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z42__device_stub__ecuacion_kernel_outplace_p1PdS_S_S_j
.addrsig_sym _Z42__device_stub__ecuacion_kernel_outplace_p2PdS_S_S_S_j
.addrsig_sym _Z43__device_stub__ecuacion_kernel_inplace_sumaPdS_S_j
.addrsig_sym _Z32__device_stub__kernel_sum_MatrizPdS_S_j
.addrsig_sym _Z33__device_stub__kernel_transpuestaPdi
.addrsig_sym _Z37__device_stub__kernel_mult_sum_matrizPdS_S_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z27ecuacion_kernel_outplace_p1PdS_S_S_j
.addrsig_sym _Z27ecuacion_kernel_outplace_p2PdS_S_S_S_j
.addrsig_sym _Z28ecuacion_kernel_inplace_sumaPdS_S_j
.addrsig_sym _Z17kernel_sum_MatrizPdS_S_j
.addrsig_sym _Z18kernel_transpuestaPdi
.addrsig_sym _Z22kernel_mult_sum_matrizPdS_S_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#define DIM 4
#define NUM_ELEMS DIM*DIM
__global__ void transpose(int *a, int *b) {
int row = blockIdx.x * DIM/2 + threadIdx.x;
int col = blockIdx.y * DIM/2 + threadIdx.y;
int newIndex = row * DIM + col;
int oldIndex = col * DIM + row;
b[newIndex] = a[oldIndex];
}
int main() {
//device memory
int *device1, *device2;
//host memory
int host[NUM_ELEMS];
int output[NUM_ELEMS];
size_t numBytes = NUM_ELEMS * sizeof(int);
int i = 0; //loop counter
//Load host1 and host2 with values.
for (i = 0; i < NUM_ELEMS; i++) {
host[i] = i+1;
}
//Allocate memory for device vars.
cudaMalloc((void **)&device1, numBytes);
cudaMalloc((void **)&device2, numBytes);
//Transfer values from host to device.
cudaMemcpy(device1, &host, numBytes, cudaMemcpyHostToDevice);
//Launch transpose kernel on GPU with given parameters.
dim3 grid(DIM/2, DIM/2); //# of thread blocks
dim3 block(DIM/2, DIM/2); //# of threads per thread block
transpose<<<grid,block>>>(device1, device2);
//Get result from device to host.
cudaMemcpy(&output, device2, numBytes, cudaMemcpyDeviceToHost);
//Print out values.
printf("[");
for (i = 0; i < NUM_ELEMS; i++) {
printf("%d ", output[i]);
}
printf("]\n");
//Free all variables.
cudaFree(device1);
cudaFree(device2);
return 0;
} | code for sm_80
Function : _Z9transposePiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2UR UR4, SR_CTAID.X ; /* 0x00000000000479c3 */
/* 0x000e220000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e620000002100 */
/*0030*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */
/* 0x000fc600078e00ff */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000ea60000002200 */
/*0050*/ S2UR UR5, SR_CTAID.Y ; /* 0x00000000000579c3 */
/* 0x000ee20000002600 */
/*0060*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */
/* 0x001fc8000800063f */
/*0070*/ ULOP3.LUT UR4, UR4, 0x7ffffffe, URZ, 0xc0, !UPT ; /* 0x7ffffffe04047892 */
/* 0x000fe4000f8ec03f */
/*0080*/ USHF.L.U32 UR5, UR5, 0x1, URZ ; /* 0x0000000105057899 */
/* 0x008fc8000800063f */
/*0090*/ IADD3 R0, R0, UR4, RZ ; /* 0x0000000400007c10 */
/* 0x002fe2000fffe0ff */
/*00a0*/ ULOP3.LUT UR5, UR5, 0x7ffffffe, URZ, 0xc0, !UPT ; /* 0x7ffffffe05057892 */
/* 0x000fcc000f8ec03f */
/*00b0*/ IADD3 R5, R5, UR5, RZ ; /* 0x0000000505057c10 */
/* 0x004fe2000fffe0ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00d0*/ LEA R2, R5, R0, 0x2 ; /* 0x0000000005027211 */
/* 0x000fca00078e10ff */
/*00e0*/ IMAD.WIDE R2, R2, R4, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0204 */
/*00f0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0100*/ LEA R5, R0, R5, 0x2 ; /* 0x0000000500057211 */
/* 0x000fca00078e10ff */
/*0110*/ IMAD.WIDE R4, R5, R4, c[0x0][0x168] ; /* 0x00005a0005047625 */
/* 0x000fca00078e0204 */
/*0120*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#define DIM 4
#define NUM_ELEMS DIM*DIM
__global__ void transpose(int *a, int *b) {
int row = blockIdx.x * DIM/2 + threadIdx.x;
int col = blockIdx.y * DIM/2 + threadIdx.y;
int newIndex = row * DIM + col;
int oldIndex = col * DIM + row;
b[newIndex] = a[oldIndex];
}
int main() {
//device memory
int *device1, *device2;
//host memory
int host[NUM_ELEMS];
int output[NUM_ELEMS];
size_t numBytes = NUM_ELEMS * sizeof(int);
int i = 0; //loop counter
//Load host1 and host2 with values.
for (i = 0; i < NUM_ELEMS; i++) {
host[i] = i+1;
}
//Allocate memory for device vars.
cudaMalloc((void **)&device1, numBytes);
cudaMalloc((void **)&device2, numBytes);
//Transfer values from host to device.
cudaMemcpy(device1, &host, numBytes, cudaMemcpyHostToDevice);
//Launch transpose kernel on GPU with given parameters.
dim3 grid(DIM/2, DIM/2); //# of thread blocks
dim3 block(DIM/2, DIM/2); //# of threads per thread block
transpose<<<grid,block>>>(device1, device2);
//Get result from device to host.
cudaMemcpy(&output, device2, numBytes, cudaMemcpyDeviceToHost);
//Print out values.
printf("[");
for (i = 0; i < NUM_ELEMS; i++) {
printf("%d ", output[i]);
}
printf("]\n");
//Free all variables.
cudaFree(device1);
cudaFree(device2);
return 0;
} | .file "tmpxft_0010dbbf_00000000-6_transpose.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z9transposePiS_PiS_
.type _Z30__device_stub__Z9transposePiS_PiS_, @function
_Z30__device_stub__Z9transposePiS_PiS_:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9transposePiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z9transposePiS_PiS_, .-_Z30__device_stub__Z9transposePiS_PiS_
.globl _Z9transposePiS_
.type _Z9transposePiS_, @function
_Z9transposePiS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z9transposePiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9transposePiS_, .-_Z9transposePiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "["
.LC1:
.string "%d "
.LC2:
.string "]\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $192, %rsp
.cfi_def_cfa_offset 224
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
movl $1, %eax
.L12:
movl %eax, 44(%rsp,%rax,4)
addq $1, %rax
cmpq $17, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $64, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, 24(%rsp)
movl $2, 28(%rsp)
movl $1, 32(%rsp)
movl $2, 36(%rsp)
movl $2, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
leaq 112(%rsp), %rbx
movl $2, %ecx
movl $64, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 176(%rsp), %r12
leaq .LC1(%rip), %rbp
.L14:
movl (%rbx), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L14
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $192, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z9transposePiS_PiS_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z9transposePiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9transposePiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#define DIM 4
#define NUM_ELEMS DIM*DIM
__global__ void transpose(int *a, int *b) {
int row = blockIdx.x * DIM/2 + threadIdx.x;
int col = blockIdx.y * DIM/2 + threadIdx.y;
int newIndex = row * DIM + col;
int oldIndex = col * DIM + row;
b[newIndex] = a[oldIndex];
}
int main() {
//device memory
int *device1, *device2;
//host memory
int host[NUM_ELEMS];
int output[NUM_ELEMS];
size_t numBytes = NUM_ELEMS * sizeof(int);
int i = 0; //loop counter
//Load host1 and host2 with values.
for (i = 0; i < NUM_ELEMS; i++) {
host[i] = i+1;
}
//Allocate memory for device vars.
cudaMalloc((void **)&device1, numBytes);
cudaMalloc((void **)&device2, numBytes);
//Transfer values from host to device.
cudaMemcpy(device1, &host, numBytes, cudaMemcpyHostToDevice);
//Launch transpose kernel on GPU with given parameters.
dim3 grid(DIM/2, DIM/2); //# of thread blocks
dim3 block(DIM/2, DIM/2); //# of threads per thread block
transpose<<<grid,block>>>(device1, device2);
//Get result from device to host.
cudaMemcpy(&output, device2, numBytes, cudaMemcpyDeviceToHost);
//Print out values.
printf("[");
for (i = 0; i < NUM_ELEMS; i++) {
printf("%d ", output[i]);
}
printf("]\n");
//Free all variables.
cudaFree(device1);
cudaFree(device2);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#define DIM 4
#define NUM_ELEMS DIM*DIM
__global__ void transpose(int *a, int *b) {
int row = blockIdx.x * DIM/2 + threadIdx.x;
int col = blockIdx.y * DIM/2 + threadIdx.y;
int newIndex = row * DIM + col;
int oldIndex = col * DIM + row;
b[newIndex] = a[oldIndex];
}
int main() {
//device memory
int *device1, *device2;
//host memory
int host[NUM_ELEMS];
int output[NUM_ELEMS];
size_t numBytes = NUM_ELEMS * sizeof(int);
int i = 0; //loop counter
//Load host1 and host2 with values.
for (i = 0; i < NUM_ELEMS; i++) {
host[i] = i+1;
}
//Allocate memory for device vars.
hipMalloc((void **)&device1, numBytes);
hipMalloc((void **)&device2, numBytes);
//Transfer values from host to device.
hipMemcpy(device1, &host, numBytes, hipMemcpyHostToDevice);
//Launch transpose kernel on GPU with given parameters.
dim3 grid(DIM/2, DIM/2); //# of thread blocks
dim3 block(DIM/2, DIM/2); //# of threads per thread block
transpose<<<grid,block>>>(device1, device2);
//Get result from device to host.
hipMemcpy(&output, device2, numBytes, hipMemcpyDeviceToHost);
//Print out values.
printf("[");
for (i = 0; i < NUM_ELEMS; i++) {
printf("%d ", output[i]);
}
printf("]\n");
//Free all variables.
hipFree(device1);
hipFree(device2);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#define DIM 4
#define NUM_ELEMS DIM*DIM
__global__ void transpose(int *a, int *b) {
int row = blockIdx.x * DIM/2 + threadIdx.x;
int col = blockIdx.y * DIM/2 + threadIdx.y;
int newIndex = row * DIM + col;
int oldIndex = col * DIM + row;
b[newIndex] = a[oldIndex];
}
int main() {
//device memory
int *device1, *device2;
//host memory
int host[NUM_ELEMS];
int output[NUM_ELEMS];
size_t numBytes = NUM_ELEMS * sizeof(int);
int i = 0; //loop counter
//Load host1 and host2 with values.
for (i = 0; i < NUM_ELEMS; i++) {
host[i] = i+1;
}
//Allocate memory for device vars.
hipMalloc((void **)&device1, numBytes);
hipMalloc((void **)&device2, numBytes);
//Transfer values from host to device.
hipMemcpy(device1, &host, numBytes, hipMemcpyHostToDevice);
//Launch transpose kernel on GPU with given parameters.
dim3 grid(DIM/2, DIM/2); //# of thread blocks
dim3 block(DIM/2, DIM/2); //# of threads per thread block
transpose<<<grid,block>>>(device1, device2);
//Get result from device to host.
hipMemcpy(&output, device2, numBytes, hipMemcpyDeviceToHost);
//Print out values.
printf("[");
for (i = 0; i < NUM_ELEMS; i++) {
printf("%d ", output[i]);
}
printf("]\n");
//Free all variables.
hipFree(device1);
hipFree(device2);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9transposePiS_
.globl _Z9transposePiS_
.p2align 8
.type _Z9transposePiS_,@function
_Z9transposePiS_:
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_lshl_b32 s2, s14, 1
s_lshl_b32 s3, s15, 1
s_and_b32 s2, s2, 0x7ffffffe
s_and_b32 s3, s3, 0x7ffffffe
v_add_nc_u32_e32 v2, s2, v1
v_add_nc_u32_e32 v3, s3, v0
s_load_b128 s[0:3], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v0, v3, 2, v2
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v4, v[0:1], off
v_lshl_add_u32 v0, v2, 2, v3
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v4, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9transposePiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9transposePiS_, .Lfunc_end0-_Z9transposePiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9transposePiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9transposePiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#define DIM 4
#define NUM_ELEMS DIM*DIM
__global__ void transpose(int *a, int *b) {
int row = blockIdx.x * DIM/2 + threadIdx.x;
int col = blockIdx.y * DIM/2 + threadIdx.y;
int newIndex = row * DIM + col;
int oldIndex = col * DIM + row;
b[newIndex] = a[oldIndex];
}
int main() {
//device memory
int *device1, *device2;
//host memory
int host[NUM_ELEMS];
int output[NUM_ELEMS];
size_t numBytes = NUM_ELEMS * sizeof(int);
int i = 0; //loop counter
//Load host1 and host2 with values.
for (i = 0; i < NUM_ELEMS; i++) {
host[i] = i+1;
}
//Allocate memory for device vars.
hipMalloc((void **)&device1, numBytes);
hipMalloc((void **)&device2, numBytes);
//Transfer values from host to device.
hipMemcpy(device1, &host, numBytes, hipMemcpyHostToDevice);
//Launch transpose kernel on GPU with given parameters.
dim3 grid(DIM/2, DIM/2); //# of thread blocks
dim3 block(DIM/2, DIM/2); //# of threads per thread block
transpose<<<grid,block>>>(device1, device2);
//Get result from device to host.
hipMemcpy(&output, device2, numBytes, hipMemcpyDeviceToHost);
//Print out values.
printf("[");
for (i = 0; i < NUM_ELEMS; i++) {
printf("%d ", output[i]);
}
printf("]\n");
//Free all variables.
hipFree(device1);
hipFree(device2);
return 0;
} | .text
.file "transpose.hip"
.globl _Z24__device_stub__transposePiS_ # -- Begin function _Z24__device_stub__transposePiS_
.p2align 4, 0x90
.type _Z24__device_stub__transposePiS_,@function
_Z24__device_stub__transposePiS_: # @_Z24__device_stub__transposePiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z9transposePiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z24__device_stub__transposePiS_, .Lfunc_end0-_Z24__device_stub__transposePiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $208, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -16
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
leaq 1(%rax), %rcx
movl %ecx, 144(%rsp,%rax,4)
movq %rcx, %rax
cmpq $16, %rcx
jne .LBB1_1
# %bb.2:
leaq 8(%rsp), %rdi
movl $64, %esi
callq hipMalloc
movq %rsp, %rdi
movl $64, %esi
callq hipMalloc
movq 8(%rsp), %rdi
leaq 144(%rsp), %rsi
movl $64, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $8589934594, %rdi # imm = 0x200000002
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9transposePiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
leaq 80(%rsp), %rdi
movl $64, %edx
movl $2, %ecx
callq hipMemcpy
movl $91, %edi
callq putchar@PLT
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl 80(%rsp,%rbx,4), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $16, %rbx
jne .LBB1_5
# %bb.6:
movl $.Lstr, %edi
callq puts@PLT
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $208, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9transposePiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9transposePiS_,@object # @_Z9transposePiS_
.section .rodata,"a",@progbits
.globl _Z9transposePiS_
.p2align 3, 0x0
_Z9transposePiS_:
.quad _Z24__device_stub__transposePiS_
.size _Z9transposePiS_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%d "
.size .L.str.1, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9transposePiS_"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "]"
.size .Lstr, 2
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__transposePiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9transposePiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9transposePiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2UR UR4, SR_CTAID.X ; /* 0x00000000000479c3 */
/* 0x000e220000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e620000002100 */
/*0030*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */
/* 0x000fc600078e00ff */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000ea60000002200 */
/*0050*/ S2UR UR5, SR_CTAID.Y ; /* 0x00000000000579c3 */
/* 0x000ee20000002600 */
/*0060*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */
/* 0x001fc8000800063f */
/*0070*/ ULOP3.LUT UR4, UR4, 0x7ffffffe, URZ, 0xc0, !UPT ; /* 0x7ffffffe04047892 */
/* 0x000fe4000f8ec03f */
/*0080*/ USHF.L.U32 UR5, UR5, 0x1, URZ ; /* 0x0000000105057899 */
/* 0x008fc8000800063f */
/*0090*/ IADD3 R0, R0, UR4, RZ ; /* 0x0000000400007c10 */
/* 0x002fe2000fffe0ff */
/*00a0*/ ULOP3.LUT UR5, UR5, 0x7ffffffe, URZ, 0xc0, !UPT ; /* 0x7ffffffe05057892 */
/* 0x000fcc000f8ec03f */
/*00b0*/ IADD3 R5, R5, UR5, RZ ; /* 0x0000000505057c10 */
/* 0x004fe2000fffe0ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00d0*/ LEA R2, R5, R0, 0x2 ; /* 0x0000000005027211 */
/* 0x000fca00078e10ff */
/*00e0*/ IMAD.WIDE R2, R2, R4, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0204 */
/*00f0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0100*/ LEA R5, R0, R5, 0x2 ; /* 0x0000000500057211 */
/* 0x000fca00078e10ff */
/*0110*/ IMAD.WIDE R4, R5, R4, c[0x0][0x168] ; /* 0x00005a0005047625 */
/* 0x000fca00078e0204 */
/*0120*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9transposePiS_
.globl _Z9transposePiS_
.p2align 8
.type _Z9transposePiS_,@function
_Z9transposePiS_:
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_lshl_b32 s2, s14, 1
s_lshl_b32 s3, s15, 1
s_and_b32 s2, s2, 0x7ffffffe
s_and_b32 s3, s3, 0x7ffffffe
v_add_nc_u32_e32 v2, s2, v1
v_add_nc_u32_e32 v3, s3, v0
s_load_b128 s[0:3], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v0, v3, 2, v2
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v4, v[0:1], off
v_lshl_add_u32 v0, v2, 2, v3
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v4, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9transposePiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9transposePiS_, .Lfunc_end0-_Z9transposePiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9transposePiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9transposePiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0010dbbf_00000000-6_transpose.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z9transposePiS_PiS_
.type _Z30__device_stub__Z9transposePiS_PiS_, @function
_Z30__device_stub__Z9transposePiS_PiS_:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9transposePiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z9transposePiS_PiS_, .-_Z30__device_stub__Z9transposePiS_PiS_
.globl _Z9transposePiS_
.type _Z9transposePiS_, @function
_Z9transposePiS_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z9transposePiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9transposePiS_, .-_Z9transposePiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "["
.LC1:
.string "%d "
.LC2:
.string "]\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $192, %rsp
.cfi_def_cfa_offset 224
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
movl $1, %eax
.L12:
movl %eax, 44(%rsp,%rax,4)
addq $1, %rax
cmpq $17, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $64, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, 24(%rsp)
movl $2, 28(%rsp)
movl $1, 32(%rsp)
movl $2, 36(%rsp)
movl $2, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
leaq 112(%rsp), %rbx
movl $2, %ecx
movl $64, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 176(%rsp), %r12
leaq .LC1(%rip), %rbp
.L14:
movl (%rbx), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L14
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $192, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z9transposePiS_PiS_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z9transposePiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9transposePiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "transpose.hip"
.globl _Z24__device_stub__transposePiS_ # -- Begin function _Z24__device_stub__transposePiS_
.p2align 4, 0x90
.type _Z24__device_stub__transposePiS_,@function
_Z24__device_stub__transposePiS_: # @_Z24__device_stub__transposePiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z9transposePiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z24__device_stub__transposePiS_, .Lfunc_end0-_Z24__device_stub__transposePiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $208, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -16
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
leaq 1(%rax), %rcx
movl %ecx, 144(%rsp,%rax,4)
movq %rcx, %rax
cmpq $16, %rcx
jne .LBB1_1
# %bb.2:
leaq 8(%rsp), %rdi
movl $64, %esi
callq hipMalloc
movq %rsp, %rdi
movl $64, %esi
callq hipMalloc
movq 8(%rsp), %rdi
leaq 144(%rsp), %rsi
movl $64, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $8589934594, %rdi # imm = 0x200000002
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9transposePiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
leaq 80(%rsp), %rdi
movl $64, %edx
movl $2, %ecx
callq hipMemcpy
movl $91, %edi
callq putchar@PLT
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl 80(%rsp,%rbx,4), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $16, %rbx
jne .LBB1_5
# %bb.6:
movl $.Lstr, %edi
callq puts@PLT
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $208, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9transposePiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9transposePiS_,@object # @_Z9transposePiS_
.section .rodata,"a",@progbits
.globl _Z9transposePiS_
.p2align 3, 0x0
_Z9transposePiS_:
.quad _Z24__device_stub__transposePiS_
.size _Z9transposePiS_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%d "
.size .L.str.1, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9transposePiS_"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "]"
.size .Lstr, 2
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__transposePiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9transposePiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<iostream>
using namespace std;
int n = 100;
__host__ __device__ bool read(int n) {
return n != 0;
}
__host__ __device__ bool read0(int n) {
return n == 0;
}
__global__ void test(int n) {
if(read0(n)) {
printf("true\n");
}
else if(read(n)){
printf("false\n");
}
}
int main() {
dim3 block(8,2);
test<<<1,block>>>(n);
cudaDeviceSynchronize();
} | code for sm_80
Function : _Z4testi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x160], PT ; /* 0x00005800ff007a0c */
/* 0x000fda0003f05270 */
/*0020*/ @!P0 BRA 0x110 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0030*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0040*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x10] ; /* 0x01000400ff047624 */
/* 0x000fe200078e00ff */
/*0050*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0060*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x14] ; /* 0x01000500ff057624 */
/* 0x000fe200078e00ff */
/*0070*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x00006c0000000a00 */
/*0080*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x000fe40000000000 */
/*0090*/ MOV R11, 0x100 ; /* 0x00000100000b7802 */
/* 0x000fe40000000f00 */
/*00a0*/ MOV R20, 0x80 ; /* 0x0000008000147802 */
/* 0x000fe40000000f00 */
/*00b0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00c0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fc40000000f00 */
/*00d0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*00e0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*00f0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0120*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0130*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0140*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0150*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x00006c0000000a00 */
/*0160*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x000fe40000000000 */
/*0170*/ MOV R11, 0x1e0 ; /* 0x000001e0000b7802 */
/* 0x000fe40000000f00 */
/*0180*/ MOV R20, 0x160 ; /* 0x0000016000147802 */
/* 0x000fe40000000f00 */
/*0190*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*01a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fc40000000f00 */
/*01b0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*01c0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*01d0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*01e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01f0*/ BRA 0x1f0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<iostream>
using namespace std;
int n = 100;
__host__ __device__ bool read(int n) {
return n != 0;
}
__host__ __device__ bool read0(int n) {
return n == 0;
}
__global__ void test(int n) {
if(read0(n)) {
printf("true\n");
}
else if(read(n)){
printf("false\n");
}
}
int main() {
dim3 block(8,2);
test<<<1,block>>>(n);
cudaDeviceSynchronize();
} | .file "tmpxft_00163fbb_00000000-6_0.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z4readi
.type _Z4readi, @function
_Z4readi:
.LFB3669:
.cfi_startproc
endbr64
testl %edi, %edi
setne %al
ret
.cfi_endproc
.LFE3669:
.size _Z4readi, .-_Z4readi
.globl _Z5read0i
.type _Z5read0i, @function
_Z5read0i:
.LFB3670:
.cfi_startproc
endbr64
testl %edi, %edi
sete %al
ret
.cfi_endproc
.LFE3670:
.size _Z5read0i, .-_Z5read0i
.globl _Z22__device_stub__Z4testii
.type _Z22__device_stub__Z4testii, @function
_Z22__device_stub__Z4testii:
.LFB3696:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4testi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z22__device_stub__Z4testii, .-_Z22__device_stub__Z4testii
.globl _Z4testi
.type _Z4testi, @function
_Z4testi:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z22__device_stub__Z4testii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z4testi, .-_Z4testi
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $8, 8(%rsp)
movl $2, 12(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 8(%rsp), %rdx
movl $1, %ecx
movq 20(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L14:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movl n(%rip), %edi
call _Z22__device_stub__Z4testii
jmp .L14
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4testi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4testi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl n
.data
.align 4
.type n, @object
.size n, 4
n:
.long 100
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<iostream>
using namespace std;
int n = 100;
__host__ __device__ bool read(int n) {
return n != 0;
}
__host__ __device__ bool read0(int n) {
return n == 0;
}
__global__ void test(int n) {
if(read0(n)) {
printf("true\n");
}
else if(read(n)){
printf("false\n");
}
}
int main() {
dim3 block(8,2);
test<<<1,block>>>(n);
cudaDeviceSynchronize();
} | #include <hip/hip_runtime.h>
#include<iostream>
using namespace std;
int n = 100;
__host__ __device__ bool read(int n) {
return n != 0;
}
__host__ __device__ bool read0(int n) {
return n == 0;
}
__global__ void test(int n) {
if(read0(n)) {
printf("true\n");
}
else if(read(n)){
printf("false\n");
}
}
int main() {
dim3 block(8,2);
test<<<1,block>>>(n);
hipDeviceSynchronize();
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<iostream>
using namespace std;
int n = 100;
__host__ __device__ bool read(int n) {
return n != 0;
}
__host__ __device__ bool read0(int n) {
return n == 0;
}
__global__ void test(int n) {
if(read0(n)) {
printf("true\n");
}
else if(read(n)){
printf("false\n");
}
}
int main() {
dim3 block(8,2);
test<<<1,block>>>(n);
hipDeviceSynchronize();
} | .text
.file "0.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z4readi # -- Begin function _Z4readi
.p2align 4, 0x90
.type _Z4readi,@function
_Z4readi: # @_Z4readi
.cfi_startproc
# %bb.0:
testl %edi, %edi
setne %al
retq
.Lfunc_end0:
.size _Z4readi, .Lfunc_end0-_Z4readi
.cfi_endproc
# -- End function
.globl _Z5read0i # -- Begin function _Z5read0i
.p2align 4, 0x90
.type _Z5read0i,@function
_Z5read0i: # @_Z5read0i
.cfi_startproc
# %bb.0:
testl %edi, %edi
sete %al
retq
.Lfunc_end1:
.size _Z5read0i, .Lfunc_end1-_Z5read0i
.cfi_endproc
# -- End function
.globl _Z19__device_stub__testi # -- Begin function _Z19__device_stub__testi
.p2align 4, 0x90
.type _Z19__device_stub__testi,@function
_Z19__device_stub__testi: # @_Z19__device_stub__testi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movl %edi, 12(%rsp)
leaq 12(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z4testi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end2:
.size _Z19__device_stub__testi, .Lfunc_end2-_Z19__device_stub__testi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $8589934600, %rdx # imm = 0x200000008
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movl n(%rip), %eax
movl %eax, 12(%rsp)
leaq 12(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z4testi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $72, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4testi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type n,@object # @n
.data
.globl n
.p2align 2, 0x0
n:
.long 100 # 0x64
.size n, 4
.type _Z4testi,@object # @_Z4testi
.section .rodata,"a",@progbits
.globl _Z4testi
.p2align 3, 0x0
_Z4testi:
.quad _Z19__device_stub__testi
.size _Z4testi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4testi"
.size .L__unnamed_1, 9
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__testi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4testi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00163fbb_00000000-6_0.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z4readi
.type _Z4readi, @function
_Z4readi:
.LFB3669:
.cfi_startproc
endbr64
testl %edi, %edi
setne %al
ret
.cfi_endproc
.LFE3669:
.size _Z4readi, .-_Z4readi
.globl _Z5read0i
.type _Z5read0i, @function
_Z5read0i:
.LFB3670:
.cfi_startproc
endbr64
testl %edi, %edi
sete %al
ret
.cfi_endproc
.LFE3670:
.size _Z5read0i, .-_Z5read0i
.globl _Z22__device_stub__Z4testii
.type _Z22__device_stub__Z4testii, @function
_Z22__device_stub__Z4testii:
.LFB3696:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z4testi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z22__device_stub__Z4testii, .-_Z22__device_stub__Z4testii
.globl _Z4testi
.type _Z4testi, @function
_Z4testi:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z22__device_stub__Z4testii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z4testi, .-_Z4testi
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $8, 8(%rsp)
movl $2, 12(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 8(%rsp), %rdx
movl $1, %ecx
movq 20(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L14:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movl n(%rip), %edi
call _Z22__device_stub__Z4testii
jmp .L14
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4testi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4testi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl n
.data
.align 4
.type n, @object
.size n, 4
n:
.long 100
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "0.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z4readi # -- Begin function _Z4readi
.p2align 4, 0x90
.type _Z4readi,@function
_Z4readi: # @_Z4readi
.cfi_startproc
# %bb.0:
testl %edi, %edi
setne %al
retq
.Lfunc_end0:
.size _Z4readi, .Lfunc_end0-_Z4readi
.cfi_endproc
# -- End function
.globl _Z5read0i # -- Begin function _Z5read0i
.p2align 4, 0x90
.type _Z5read0i,@function
_Z5read0i: # @_Z5read0i
.cfi_startproc
# %bb.0:
testl %edi, %edi
sete %al
retq
.Lfunc_end1:
.size _Z5read0i, .Lfunc_end1-_Z5read0i
.cfi_endproc
# -- End function
.globl _Z19__device_stub__testi # -- Begin function _Z19__device_stub__testi
.p2align 4, 0x90
.type _Z19__device_stub__testi,@function
_Z19__device_stub__testi: # @_Z19__device_stub__testi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movl %edi, 12(%rsp)
leaq 12(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z4testi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end2:
.size _Z19__device_stub__testi, .Lfunc_end2-_Z19__device_stub__testi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $8589934600, %rdx # imm = 0x200000008
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movl n(%rip), %eax
movl %eax, 12(%rsp)
leaq 12(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z4testi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $72, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4testi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type n,@object # @n
.data
.globl n
.p2align 2, 0x0
n:
.long 100 # 0x64
.size n, 4
.type _Z4testi,@object # @_Z4testi
.section .rodata,"a",@progbits
.globl _Z4testi
.p2align 3, 0x0
_Z4testi:
.quad _Z19__device_stub__testi
.size _Z4testi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4testi"
.size .L__unnamed_1, 9
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__testi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4testi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
#include <vector>
#include <algorithm>
#include <numeric>
#include <random>
#include <chrono>
using namespace std;
template<typename T>
std::vector<std::size_t> tag_sort(const std::vector<T>& v)
{
std::vector<std::size_t> result(v.size());
std::iota(std::begin(result), std::end(result), 0);
std::sort(std::begin(result), std::end(result),
[&v](const double & lhs, const double & rhs)
{
return v[lhs] < v[rhs];
}
);
return result;
}
int main(){
int vec_size = 10000000;
vector<double> values;
vector<int> indices;
double lower_bound = 0;
double upper_bound = 10000;
std::default_random_engine re;
std::uniform_real_distribution<double> unif(lower_bound,upper_bound);
for(int i = 0; i < vec_size; i++){
double a_random_double = unif(re);
values.push_back(a_random_double);
indices.push_back(i);
}
// generating values
//for(double i : values)
// cout << i << " ";
//cout << endl;
vector<double> values_cpu(values);
// sort using c++ stl vector
auto start_cpu = std::chrono::high_resolution_clock::now();
auto idices_cpu = tag_sort(values_cpu);
auto finish_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_cpu = finish_cpu - start_cpu;
cout <<" CPU single thread executing time: " << elapsed_cpu.count() << endl;
// output
//for (auto && elem:idxs)
// std::cout << elem << " : " << values[elem] << std::endl;
thrust::device_vector<double> values_gpu(values);
thrust::device_vector<int> indices_gpu(indices);
// sort using cuda gpu
auto start_gpu = std::chrono::high_resolution_clock::now();
thrust::sort_by_key(values_gpu.begin(), values_gpu.end(), indices_gpu.begin());
auto finish_gpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_gpu = finish_gpu - start_gpu;
cout <<" GPU CUDA executing time: " << elapsed_gpu.count() << endl;
//for(int i = 0; i < vec_size; i++)
// cout << indices[i] << " : " << values[i] << endl;
bool are_equal = true;
for(int i = 0; i < vec_size; i++)
if (idices_cpu[i] != indices_gpu[i]){
are_equal = false;
break;
}
cout << are_equal << endl;
return 0;
} | #include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
#include <vector>
#include <algorithm>
#include <numeric>
#include <random>
#include <chrono>
using namespace std;
template<typename T>
std::vector<std::size_t> tag_sort(const std::vector<T>& v)
{
std::vector<std::size_t> result(v.size());
std::iota(std::begin(result), std::end(result), 0);
std::sort(std::begin(result), std::end(result),
[&v](const double & lhs, const double & rhs)
{
return v[lhs] < v[rhs];
}
);
return result;
}
int main(){
int vec_size = 10000000;
vector<double> values;
vector<int> indices;
double lower_bound = 0;
double upper_bound = 10000;
std::default_random_engine re;
std::uniform_real_distribution<double> unif(lower_bound,upper_bound);
for(int i = 0; i < vec_size; i++){
double a_random_double = unif(re);
values.push_back(a_random_double);
indices.push_back(i);
}
// generating values
//for(double i : values)
// cout << i << " ";
//cout << endl;
vector<double> values_cpu(values);
// sort using c++ stl vector
auto start_cpu = std::chrono::high_resolution_clock::now();
auto idices_cpu = tag_sort(values_cpu);
auto finish_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_cpu = finish_cpu - start_cpu;
cout <<" CPU single thread executing time: " << elapsed_cpu.count() << endl;
// output
//for (auto && elem:idxs)
// std::cout << elem << " : " << values[elem] << std::endl;
thrust::device_vector<double> values_gpu(values);
thrust::device_vector<int> indices_gpu(indices);
// sort using cuda gpu
auto start_gpu = std::chrono::high_resolution_clock::now();
thrust::sort_by_key(values_gpu.begin(), values_gpu.end(), indices_gpu.begin());
auto finish_gpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_gpu = finish_gpu - start_gpu;
cout <<" GPU CUDA executing time: " << elapsed_gpu.count() << endl;
//for(int i = 0; i < vec_size; i++)
// cout << indices[i] << " : " << values[i] << endl;
bool are_equal = true;
for(int i = 0; i < vec_size; i++)
if (idices_cpu[i] != indices_gpu[i]){
are_equal = false;
break;
}
cout << are_equal << endl;
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void mykernel()
{
int i = blockIdx.x;
int j = threadIdx.x;
printf("Hello world from Kernel\tBlock id: %d\tThread id: %d\n", i, j);
}
int main()
{
printf("Hello world from CPU\n");
mykernel<<<4,5>>>();
cudaDeviceReset();
while(1);
return 0;
} | code for sm_80
Function : _Z8mykernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc800078e00ff */
/*0010*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fe20007ffe0ff */
/*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0040*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0050*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */
/* 0x000e220000002500 */
/*0060*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe20007f1e0ff */
/*0070*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0080*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x0002a60000000a00 */
/*0090*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */
/* 0x000fe200000e06ff */
/*00a0*/ STL.64 [R1], R8 ; /* 0x0000000801007387 */
/* 0x0013e80000100a00 */
/*00b0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x002fc60000000000 */
/*00c0*/ MOV R11, 0x130 ; /* 0x00000130000b7802 */
/* 0x000fe40000000f00 */
/*00d0*/ MOV R20, 0xb0 ; /* 0x000000b000147802 */
/* 0x000fc40000000f00 */
/*00e0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00f0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0100*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0110*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0120*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x004fea0003c00000 */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void mykernel()
{
int i = blockIdx.x;
int j = threadIdx.x;
printf("Hello world from Kernel\tBlock id: %d\tThread id: %d\n", i, j);
}
int main()
{
printf("Hello world from CPU\n");
mykernel<<<4,5>>>();
cudaDeviceReset();
while(1);
return 0;
} | .file "tmpxft_0004f92f_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z8mykernelvv
.type _Z26__device_stub__Z8mykernelvv, @function
_Z26__device_stub__Z8mykernelvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z8mykernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z8mykernelvv, .-_Z26__device_stub__Z8mykernelvv
.globl _Z8mykernelv
.type _Z8mykernelv, @function
_Z8mykernelv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z8mykernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8mykernelv, .-_Z8mykernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello world from CPU\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $5, 20(%rsp)
movl $1, 24(%rsp)
movl $4, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
call cudaDeviceReset@PLT
.L13:
jmp .L13
.L15:
call _Z26__device_stub__Z8mykernelvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z8mykernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z8mykernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void mykernel()
{
int i = blockIdx.x;
int j = threadIdx.x;
printf("Hello world from Kernel\tBlock id: %d\tThread id: %d\n", i, j);
}
int main()
{
printf("Hello world from CPU\n");
mykernel<<<4,5>>>();
cudaDeviceReset();
while(1);
return 0;
} | #include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void mykernel()
{
int i = blockIdx.x;
int j = threadIdx.x;
printf("Hello world from Kernel\tBlock id: %d\tThread id: %d\n", i, j);
}
int main()
{
printf("Hello world from CPU\n");
mykernel<<<4,5>>>();
hipDeviceReset();
while(1);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void mykernel()
{
int i = blockIdx.x;
int j = threadIdx.x;
printf("Hello world from Kernel\tBlock id: %d\tThread id: %d\n", i, j);
}
int main()
{
printf("Hello world from CPU\n");
mykernel<<<4,5>>>();
hipDeviceReset();
while(1);
return 0;
} | .text
.file "kernel.hip"
.globl _Z23__device_stub__mykernelv # -- Begin function _Z23__device_stub__mykernelv
.p2align 4, 0x90
.type _Z23__device_stub__mykernelv,@function
_Z23__device_stub__mykernelv: # @_Z23__device_stub__mykernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8mykernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z23__device_stub__mykernelv, .Lfunc_end0-_Z23__device_stub__mykernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $.Lstr, %edi
callq puts@PLT
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 1(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
je .LBB1_1
# %bb.2:
callq hipDeviceReset
.LBB1_1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8mykernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
callq hipDeviceReset
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8mykernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8mykernelv,@object # @_Z8mykernelv
.section .rodata,"a",@progbits
.globl _Z8mykernelv
.p2align 3, 0x0
_Z8mykernelv:
.quad _Z23__device_stub__mykernelv
.size _Z8mykernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8mykernelv"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello world from CPU"
.size .Lstr, 21
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__mykernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8mykernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0004f92f_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z8mykernelvv
.type _Z26__device_stub__Z8mykernelvv, @function
_Z26__device_stub__Z8mykernelvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z8mykernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z8mykernelvv, .-_Z26__device_stub__Z8mykernelvv
.globl _Z8mykernelv
.type _Z8mykernelv, @function
_Z8mykernelv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z8mykernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z8mykernelv, .-_Z8mykernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello world from CPU\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $5, 20(%rsp)
movl $1, 24(%rsp)
movl $4, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
call cudaDeviceReset@PLT
.L13:
jmp .L13
.L15:
call _Z26__device_stub__Z8mykernelvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z8mykernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z8mykernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl _Z23__device_stub__mykernelv # -- Begin function _Z23__device_stub__mykernelv
.p2align 4, 0x90
.type _Z23__device_stub__mykernelv,@function
_Z23__device_stub__mykernelv: # @_Z23__device_stub__mykernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8mykernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z23__device_stub__mykernelv, .Lfunc_end0-_Z23__device_stub__mykernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $.Lstr, %edi
callq puts@PLT
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 1(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
je .LBB1_1
# %bb.2:
callq hipDeviceReset
.LBB1_1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z8mykernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
callq hipDeviceReset
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8mykernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8mykernelv,@object # @_Z8mykernelv
.section .rodata,"a",@progbits
.globl _Z8mykernelv
.p2align 3, 0x0
_Z8mykernelv:
.quad _Z23__device_stub__mykernelv
.size _Z8mykernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8mykernelv"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello world from CPU"
.size .Lstr, 21
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__mykernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8mykernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
#include <math.h>
using namespace std;
#define BLOCK_SIZE 4;
__global__ void sum(int* input, int n) // global call to cuda function (host to device)
{
const int tid = threadIdx.x; // get thread ID
int step_size = 1;
int number_of_threads = blockDim.x; // initiate step size and number of threads
while (number_of_threads > 0)
{
if (tid < number_of_threads)
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size; // calculate indices of first and second element to be added
if(snd < n)
{
input[fst] += input[snd]; // add elements
}
}
step_size <<= 1; // multiply step size by 2
if(number_of_threads == 1)
break;
number_of_threads = (int)ceil((float)number_of_threads/2.0); // divide number of threads by 2
__syncthreads();
}
}
int main()
{
int count=0;
int result;
int *d;
cout<<"\nEnter the number of elements : ";
cin>>count;
const int size = count * sizeof(int);
int *h;
h = new int[count];
cout<<"\nEnter the elements : \n";
for(int i=0;i<count;i++)
cin>>h[i];
cudaMalloc(&d, size); // allocate device variable memory
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); // copy array from host to device
//cout<<ceil((float)count/2.0);
sum <<<1, ceil((float)count/2.0) >>>(d,count); // function call func_name<<<no_of_blocks,no_of_threads>>>(args)
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost); // copy result back from device to host
cout << "Sum is " << result << endl;
getchar();
cudaFree(d); // free device memory
delete[] h;
return 0;
}
/*
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvcc ParRedSum.cu -o ParRedSum
ParRedSum.cu
Creating library ParRedSum.lib and object ParRedSum.exp
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvprof ./ParRedSum
Enter the number of elements : 4
Enter the elements :
2
49
12
54
==4900== NVPROF is profiling process 4900, command: ./ParRedSum
Sum is 117
==4900== Profiling application: ./ParRedSum
==4900== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 60.16% 2.4640us 1 2.4640us 2.4640us 2.4640us sum(int*, int)
25.00% 1.0240us 1 1.0240us 1.0240us 1.0240us [CUDA memcpy HtoD]
14.84% 608ns 1 608ns 608ns 608ns [CUDA memcpy DtoH]
API calls: 82.75% 203.24ms 1 203.24ms 203.24ms 203.24ms cudaMalloc
16.83% 41.338ms 1 41.338ms 41.338ms 41.338ms cuDevicePrimaryCtxRelease
0.16% 392.40us 97 4.0450us 100ns 220.50us cuDeviceGetAttribute
0.10% 243.20us 1 243.20us 243.20us 243.20us cudaFree
0.07% 170.80us 2 85.400us 62.900us 107.90us cudaMemcpy
0.06% 151.20us 1 151.20us 151.20us 151.20us cuModuleUnload
0.01% 29.200us 1 29.200us 29.200us 29.200us cudaLaunchKernel
0.01% 18.800us 1 18.800us 18.800us 18.800us cuDeviceTotalMem
0.00% 9.8000us 1 9.8000us 9.8000us 9.8000us cuDeviceGetPCIBusId
0.00% 1.3000us 3 433ns 200ns 800ns cuDeviceGetCount
0.00% 900ns 1 900ns 900ns 900ns cuDeviceGetName
0.00% 700ns 2 350ns 100ns 600ns cuDeviceGet
0.00% 400ns 1 400ns 400ns 400ns cuDeviceGetLuid
0.00% 200ns 1 200ns 200ns 200ns cuDeviceGetUuid
*/ | code for sm_80
Function : _Z3sumPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */
/* 0x000fca00078e00ff */
/*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0050*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0060*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe40000000000 */
/*0070*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0080*/ IMAD.SHL.U32 R8, R9, 0x2, RZ ; /* 0x0000000209087824 */
/* 0x001fe400078e00ff */
/*0090*/ ISETP.GE.AND P1, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x000fe20003f26270 */
/*00a0*/ BSSY B0, 0x190 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*00b0*/ ISETP.NE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fd60003f05270 */
/*00c0*/ @P1 BRA 0x180 ; /* 0x000000b000001947 */
/* 0x000fea0003800000 */
/*00d0*/ IMAD R5, R8, UR4, RZ ; /* 0x0000000408057c24 */
/* 0x000fca000f8e02ff */
/*00e0*/ IADD3 R3, R5, UR4, RZ ; /* 0x0000000405037c10 */
/* 0x000fc8000fffe0ff */
/*00f0*/ ISETP.GE.AND P1, PT, R3, c[0x0][0x168], PT ; /* 0x00005a0003007a0c */
/* 0x000fda0003f26270 */
/*0100*/ @P1 BRA 0x180 ; /* 0x0000007000001947 */
/* 0x000fea0003800000 */
/*0110*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */
/* 0x000fd400000001ff */
/*0120*/ IMAD.WIDE R2, R3, R4, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fc800078e0204 */
/*0130*/ IMAD.WIDE R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */
/* 0x000fe400078e0204 */
/*0140*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea8000c1e1900 */
/*0150*/ LDG.E R6, [R4.64] ; /* 0x0000000604067981 */
/* 0x000ea4000c1e1900 */
/*0160*/ IMAD.IADD R7, R6, 0x1, R3 ; /* 0x0000000106077824 */
/* 0x004fca00078e0203 */
/*0170*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e4000c101906 */
/*0180*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0190*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*01a0*/ I2F R4, R0 ; /* 0x0000000000047306 */
/* 0x001e220000201400 */
/*01b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01c0*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */
/* 0x000fcc000800063f */
/*01d0*/ F2F.F64.F32 R2, R4 ; /* 0x0000000400027310 */
/* 0x001e240000201800 */
/*01e0*/ DMUL R2, R2, 0.5 ; /* 0x3fe0000002027828 */
/* 0x001e0c0000000000 */
/*01f0*/ F2I.F64.CEIL R0, R2 ; /* 0x0000000200007311 */
/* 0x001e240000309100 */
/*0200*/ ISETP.GT.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x001fda0003f04270 */
/*0210*/ @P0 BRA 0x90 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
#include <math.h>
using namespace std;
#define BLOCK_SIZE 4;
__global__ void sum(int* input, int n) // global call to cuda function (host to device)
{
const int tid = threadIdx.x; // get thread ID
int step_size = 1;
int number_of_threads = blockDim.x; // initiate step size and number of threads
while (number_of_threads > 0)
{
if (tid < number_of_threads)
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size; // calculate indices of first and second element to be added
if(snd < n)
{
input[fst] += input[snd]; // add elements
}
}
step_size <<= 1; // multiply step size by 2
if(number_of_threads == 1)
break;
number_of_threads = (int)ceil((float)number_of_threads/2.0); // divide number of threads by 2
__syncthreads();
}
}
int main()
{
int count=0;
int result;
int *d;
cout<<"\nEnter the number of elements : ";
cin>>count;
const int size = count * sizeof(int);
int *h;
h = new int[count];
cout<<"\nEnter the elements : \n";
for(int i=0;i<count;i++)
cin>>h[i];
cudaMalloc(&d, size); // allocate device variable memory
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); // copy array from host to device
//cout<<ceil((float)count/2.0);
sum <<<1, ceil((float)count/2.0) >>>(d,count); // function call func_name<<<no_of_blocks,no_of_threads>>>(args)
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost); // copy result back from device to host
cout << "Sum is " << result << endl;
getchar();
cudaFree(d); // free device memory
delete[] h;
return 0;
}
/*
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvcc ParRedSum.cu -o ParRedSum
ParRedSum.cu
Creating library ParRedSum.lib and object ParRedSum.exp
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvprof ./ParRedSum
Enter the number of elements : 4
Enter the elements :
2
49
12
54
==4900== NVPROF is profiling process 4900, command: ./ParRedSum
Sum is 117
==4900== Profiling application: ./ParRedSum
==4900== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 60.16% 2.4640us 1 2.4640us 2.4640us 2.4640us sum(int*, int)
25.00% 1.0240us 1 1.0240us 1.0240us 1.0240us [CUDA memcpy HtoD]
14.84% 608ns 1 608ns 608ns 608ns [CUDA memcpy DtoH]
API calls: 82.75% 203.24ms 1 203.24ms 203.24ms 203.24ms cudaMalloc
16.83% 41.338ms 1 41.338ms 41.338ms 41.338ms cuDevicePrimaryCtxRelease
0.16% 392.40us 97 4.0450us 100ns 220.50us cuDeviceGetAttribute
0.10% 243.20us 1 243.20us 243.20us 243.20us cudaFree
0.07% 170.80us 2 85.400us 62.900us 107.90us cudaMemcpy
0.06% 151.20us 1 151.20us 151.20us 151.20us cuModuleUnload
0.01% 29.200us 1 29.200us 29.200us 29.200us cudaLaunchKernel
0.01% 18.800us 1 18.800us 18.800us 18.800us cuDeviceTotalMem
0.00% 9.8000us 1 9.8000us 9.8000us 9.8000us cuDeviceGetPCIBusId
0.00% 1.3000us 3 433ns 200ns 800ns cuDeviceGetCount
0.00% 900ns 1 900ns 900ns 900ns cuDeviceGetName
0.00% 700ns 2 350ns 100ns 600ns cuDeviceGet
0.00% 400ns 1 400ns 400ns 400ns cuDeviceGetLuid
0.00% 200ns 1 200ns 200ns 200ns cuDeviceGetUuid
*/ | .file "tmpxft_0002ac37_00000000-6_ParRedSum.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3711:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3711:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z23__device_stub__Z3sumPiiPii
.type _Z23__device_stub__Z3sumPiiPii, @function
_Z23__device_stub__Z3sumPiiPii:
.LFB3733:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3sumPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3733:
.size _Z23__device_stub__Z3sumPiiPii, .-_Z23__device_stub__Z3sumPiiPii
.globl _Z3sumPii
.type _Z3sumPii, @function
_Z3sumPii:
.LFB3734:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3sumPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3734:
.size _Z3sumPii, .-_Z3sumPii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "\nEnter the number of elements : "
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "\nEnter the elements : \n"
.LC6:
.string "Sum is "
.text
.globl main
.type main, @function
main:
.LFB3708:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $48, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $0, 4(%rsp)
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq 4(%rsp), %rsi
leaq _ZSt3cin(%rip), %rdi
call _ZNSirsERi@PLT
movl 4(%rsp), %ebx
movslq %ebx, %rdi
movabsq $2305843009213693950, %rax
cmpq %rdi, %rax
jb .L12
leal 0(,%rbx,4), %r14d
salq $2, %rdi
call _Znam@PLT
movq %rax, %r13
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %r13, %rbp
movl $0, %ebx
leaq _ZSt3cin(%rip), %r12
cmpl $0, 4(%rsp)
jle .L14
.L16:
movq %rbp, %rsi
movq %r12, %rdi
call _ZNSirsERi@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %ebx, 4(%rsp)
jg .L16
.L14:
movslq %r14d, %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r13, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtsi2ssl 4(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LC2(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC7(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC3(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L17
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC5(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L17:
cvttsd2siq %xmm3, %rax
movl %eax, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L18:
leaq 28(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 8(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 28(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq stdin(%rip), %rdi
call getc@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq %r13, %rdi
call _ZdaPv@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
movq 40(%rsp), %rax
subq %fs:40, %rax
je .L15
call __stack_chk_fail@PLT
.L15:
call __cxa_throw_bad_array_new_length@PLT
.L23:
movl 4(%rsp), %esi
movq 8(%rsp), %rdi
call _Z23__device_stub__Z3sumPiiPii
jmp .L18
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3708:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z3sumPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3736:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z3sumPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3736:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1071644672
.align 8
.LC3:
.long 0
.long 1127219200
.align 8
.LC5:
.long 0
.long 1072693248
.align 8
.LC7:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
#include <math.h>
using namespace std;
#define BLOCK_SIZE 4;
__global__ void sum(int* input, int n) // global call to cuda function (host to device)
{
const int tid = threadIdx.x; // get thread ID
int step_size = 1;
int number_of_threads = blockDim.x; // initiate step size and number of threads
while (number_of_threads > 0)
{
if (tid < number_of_threads)
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size; // calculate indices of first and second element to be added
if(snd < n)
{
input[fst] += input[snd]; // add elements
}
}
step_size <<= 1; // multiply step size by 2
if(number_of_threads == 1)
break;
number_of_threads = (int)ceil((float)number_of_threads/2.0); // divide number of threads by 2
__syncthreads();
}
}
int main()
{
int count=0;
int result;
int *d;
cout<<"\nEnter the number of elements : ";
cin>>count;
const int size = count * sizeof(int);
int *h;
h = new int[count];
cout<<"\nEnter the elements : \n";
for(int i=0;i<count;i++)
cin>>h[i];
cudaMalloc(&d, size); // allocate device variable memory
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); // copy array from host to device
//cout<<ceil((float)count/2.0);
sum <<<1, ceil((float)count/2.0) >>>(d,count); // function call func_name<<<no_of_blocks,no_of_threads>>>(args)
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost); // copy result back from device to host
cout << "Sum is " << result << endl;
getchar();
cudaFree(d); // free device memory
delete[] h;
return 0;
}
/*
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvcc ParRedSum.cu -o ParRedSum
ParRedSum.cu
Creating library ParRedSum.lib and object ParRedSum.exp
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvprof ./ParRedSum
Enter the number of elements : 4
Enter the elements :
2
49
12
54
==4900== NVPROF is profiling process 4900, command: ./ParRedSum
Sum is 117
==4900== Profiling application: ./ParRedSum
==4900== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 60.16% 2.4640us 1 2.4640us 2.4640us 2.4640us sum(int*, int)
25.00% 1.0240us 1 1.0240us 1.0240us 1.0240us [CUDA memcpy HtoD]
14.84% 608ns 1 608ns 608ns 608ns [CUDA memcpy DtoH]
API calls: 82.75% 203.24ms 1 203.24ms 203.24ms 203.24ms cudaMalloc
16.83% 41.338ms 1 41.338ms 41.338ms 41.338ms cuDevicePrimaryCtxRelease
0.16% 392.40us 97 4.0450us 100ns 220.50us cuDeviceGetAttribute
0.10% 243.20us 1 243.20us 243.20us 243.20us cudaFree
0.07% 170.80us 2 85.400us 62.900us 107.90us cudaMemcpy
0.06% 151.20us 1 151.20us 151.20us 151.20us cuModuleUnload
0.01% 29.200us 1 29.200us 29.200us 29.200us cudaLaunchKernel
0.01% 18.800us 1 18.800us 18.800us 18.800us cuDeviceTotalMem
0.00% 9.8000us 1 9.8000us 9.8000us 9.8000us cuDeviceGetPCIBusId
0.00% 1.3000us 3 433ns 200ns 800ns cuDeviceGetCount
0.00% 900ns 1 900ns 900ns 900ns cuDeviceGetName
0.00% 700ns 2 350ns 100ns 600ns cuDeviceGet
0.00% 400ns 1 400ns 400ns 400ns cuDeviceGetLuid
0.00% 200ns 1 200ns 200ns 200ns cuDeviceGetUuid
*/ | #include "hip/hip_runtime.h"
#include <iostream>
#include <numeric>
#include <math.h>
using namespace std;
#define BLOCK_SIZE 4;
__global__ void sum(int* input, int n) // global call to cuda function (host to device)
{
const int tid = threadIdx.x; // get thread ID
int step_size = 1;
int number_of_threads = blockDim.x; // initiate step size and number of threads
while (number_of_threads > 0)
{
if (tid < number_of_threads)
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size; // calculate indices of first and second element to be added
if(snd < n)
{
input[fst] += input[snd]; // add elements
}
}
step_size <<= 1; // multiply step size by 2
if(number_of_threads == 1)
break;
number_of_threads = (int)ceil((float)number_of_threads/2.0); // divide number of threads by 2
__syncthreads();
}
}
int main()
{
int count=0;
int result;
int *d;
cout<<"\nEnter the number of elements : ";
cin>>count;
const int size = count * sizeof(int);
int *h;
h = new int[count];
cout<<"\nEnter the elements : \n";
for(int i=0;i<count;i++)
cin>>h[i];
hipMalloc(&d, size); // allocate device variable memory
hipMemcpy(d, h, size, hipMemcpyHostToDevice); // copy array from host to device
//cout<<ceil((float)count/2.0);
sum <<<1, ceil((float)count/2.0) >>>(d,count); // function call func_name<<<no_of_blocks,no_of_threads>>>(args)
hipMemcpy(&result, d, sizeof(int), hipMemcpyDeviceToHost); // copy result back from device to host
cout << "Sum is " << result << endl;
getchar();
hipFree(d); // free device memory
delete[] h;
return 0;
}
/*
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvcc ParRedSum.cu -o ParRedSum
ParRedSum.cu
Creating library ParRedSum.lib and object ParRedSum.exp
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvprof ./ParRedSum
Enter the number of elements : 4
Enter the elements :
2
49
12
54
==4900== NVPROF is profiling process 4900, command: ./ParRedSum
Sum is 117
==4900== Profiling application: ./ParRedSum
==4900== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 60.16% 2.4640us 1 2.4640us 2.4640us 2.4640us sum(int*, int)
25.00% 1.0240us 1 1.0240us 1.0240us 1.0240us [CUDA memcpy HtoD]
14.84% 608ns 1 608ns 608ns 608ns [CUDA memcpy DtoH]
API calls: 82.75% 203.24ms 1 203.24ms 203.24ms 203.24ms cudaMalloc
16.83% 41.338ms 1 41.338ms 41.338ms 41.338ms cuDevicePrimaryCtxRelease
0.16% 392.40us 97 4.0450us 100ns 220.50us cuDeviceGetAttribute
0.10% 243.20us 1 243.20us 243.20us 243.20us cudaFree
0.07% 170.80us 2 85.400us 62.900us 107.90us cudaMemcpy
0.06% 151.20us 1 151.20us 151.20us 151.20us cuModuleUnload
0.01% 29.200us 1 29.200us 29.200us 29.200us cudaLaunchKernel
0.01% 18.800us 1 18.800us 18.800us 18.800us cuDeviceTotalMem
0.00% 9.8000us 1 9.8000us 9.8000us 9.8000us cuDeviceGetPCIBusId
0.00% 1.3000us 3 433ns 200ns 800ns cuDeviceGetCount
0.00% 900ns 1 900ns 900ns 900ns cuDeviceGetName
0.00% 700ns 2 350ns 100ns 600ns cuDeviceGet
0.00% 400ns 1 400ns 400ns 400ns cuDeviceGetLuid
0.00% 200ns 1 200ns 200ns 200ns cuDeviceGetUuid
*/ |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "hip/hip_runtime.h"
#include <iostream>
#include <numeric>
#include <math.h>
using namespace std;
#define BLOCK_SIZE 4;
__global__ void sum(int* input, int n) // global call to cuda function (host to device)
{
const int tid = threadIdx.x; // get thread ID
int step_size = 1;
int number_of_threads = blockDim.x; // initiate step size and number of threads
while (number_of_threads > 0)
{
if (tid < number_of_threads)
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size; // calculate indices of first and second element to be added
if(snd < n)
{
input[fst] += input[snd]; // add elements
}
}
step_size <<= 1; // multiply step size by 2
if(number_of_threads == 1)
break;
number_of_threads = (int)ceil((float)number_of_threads/2.0); // divide number of threads by 2
__syncthreads();
}
}
int main()
{
int count=0;
int result;
int *d;
cout<<"\nEnter the number of elements : ";
cin>>count;
const int size = count * sizeof(int);
int *h;
h = new int[count];
cout<<"\nEnter the elements : \n";
for(int i=0;i<count;i++)
cin>>h[i];
hipMalloc(&d, size); // allocate device variable memory
hipMemcpy(d, h, size, hipMemcpyHostToDevice); // copy array from host to device
//cout<<ceil((float)count/2.0);
sum <<<1, ceil((float)count/2.0) >>>(d,count); // function call func_name<<<no_of_blocks,no_of_threads>>>(args)
hipMemcpy(&result, d, sizeof(int), hipMemcpyDeviceToHost); // copy result back from device to host
cout << "Sum is " << result << endl;
getchar();
hipFree(d); // free device memory
delete[] h;
return 0;
}
/*
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvcc ParRedSum.cu -o ParRedSum
ParRedSum.cu
Creating library ParRedSum.lib and object ParRedSum.exp
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvprof ./ParRedSum
Enter the number of elements : 4
Enter the elements :
2
49
12
54
==4900== NVPROF is profiling process 4900, command: ./ParRedSum
Sum is 117
==4900== Profiling application: ./ParRedSum
==4900== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 60.16% 2.4640us 1 2.4640us 2.4640us 2.4640us sum(int*, int)
25.00% 1.0240us 1 1.0240us 1.0240us 1.0240us [CUDA memcpy HtoD]
14.84% 608ns 1 608ns 608ns 608ns [CUDA memcpy DtoH]
API calls: 82.75% 203.24ms 1 203.24ms 203.24ms 203.24ms cudaMalloc
16.83% 41.338ms 1 41.338ms 41.338ms 41.338ms cuDevicePrimaryCtxRelease
0.16% 392.40us 97 4.0450us 100ns 220.50us cuDeviceGetAttribute
0.10% 243.20us 1 243.20us 243.20us 243.20us cudaFree
0.07% 170.80us 2 85.400us 62.900us 107.90us cudaMemcpy
0.06% 151.20us 1 151.20us 151.20us 151.20us cuModuleUnload
0.01% 29.200us 1 29.200us 29.200us 29.200us cudaLaunchKernel
0.01% 18.800us 1 18.800us 18.800us 18.800us cuDeviceTotalMem
0.00% 9.8000us 1 9.8000us 9.8000us 9.8000us cuDeviceGetPCIBusId
0.00% 1.3000us 3 433ns 200ns 800ns cuDeviceGetCount
0.00% 900ns 1 900ns 900ns 900ns cuDeviceGetName
0.00% 700ns 2 350ns 100ns 600ns cuDeviceGet
0.00% 400ns 1 400ns 400ns 400ns cuDeviceGetLuid
0.00% 200ns 1 200ns 200ns 200ns cuDeviceGetUuid
*/ | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3sumPii
.globl _Z3sumPii
.p2align 8
.type _Z3sumPii,@function
_Z3sumPii:
s_load_b32 s4, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
v_cmp_eq_u16_e64 s2, s4, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s2
s_cbranch_vccnz .LBB0_7
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s1, s[0:1], 0x8
s_and_b32 s0, 0xffff, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v6, s0 :: v_dual_lshlrev_b32 v5, 1, v0
s_mov_b32 s4, 1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
v_cvt_f32_i32_e32 v1, v6
s_lshl_b32 s4, s4, 1
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
v_cvt_f64_f32_e32 v[1:2], v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[1:2], v[1:2], 0.5
v_ceil_f64_e32 v[1:2], v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f64_e32 v6, v[1:2]
v_cmp_gt_i32_e64 s0, 1, v6
s_delay_alu instid0(VALU_DEP_1)
s_and_not1_b32 vcc_lo, exec_lo, s0
s_cbranch_vccz .LBB0_7
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v1, v5, s4
v_cmp_lt_u32_e32 vcc_lo, v0, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v3, s4, v1
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e64 s0, s1, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s5, vcc_lo, s0
s_and_saveexec_b32 s0, s5
s_cbranch_execz .LBB0_5
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v1, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
s_clause 0x1
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v[1:2], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v3, v4, v3
global_store_b32 v[1:2], v3, off
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s0
v_cmp_eq_u32_e32 vcc_lo, 1, v6
s_cbranch_vccz .LBB0_2
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3sumPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 6
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3sumPii, .Lfunc_end0-_Z3sumPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3sumPii
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z3sumPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "hip/hip_runtime.h"
#include <iostream>
#include <numeric>
#include <math.h>
using namespace std;
#define BLOCK_SIZE 4;
__global__ void sum(int* input, int n) // global call to cuda function (host to device)
{
const int tid = threadIdx.x; // get thread ID
int step_size = 1;
int number_of_threads = blockDim.x; // initiate step size and number of threads
while (number_of_threads > 0)
{
if (tid < number_of_threads)
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size; // calculate indices of first and second element to be added
if(snd < n)
{
input[fst] += input[snd]; // add elements
}
}
step_size <<= 1; // multiply step size by 2
if(number_of_threads == 1)
break;
number_of_threads = (int)ceil((float)number_of_threads/2.0); // divide number of threads by 2
__syncthreads();
}
}
int main()
{
int count=0;
int result;
int *d;
cout<<"\nEnter the number of elements : ";
cin>>count;
const int size = count * sizeof(int);
int *h;
h = new int[count];
cout<<"\nEnter the elements : \n";
for(int i=0;i<count;i++)
cin>>h[i];
hipMalloc(&d, size); // allocate device variable memory
hipMemcpy(d, h, size, hipMemcpyHostToDevice); // copy array from host to device
//cout<<ceil((float)count/2.0);
sum <<<1, ceil((float)count/2.0) >>>(d,count); // function call func_name<<<no_of_blocks,no_of_threads>>>(args)
hipMemcpy(&result, d, sizeof(int), hipMemcpyDeviceToHost); // copy result back from device to host
cout << "Sum is " << result << endl;
getchar();
hipFree(d); // free device memory
delete[] h;
return 0;
}
/*
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvcc ParRedSum.cu -o ParRedSum
ParRedSum.cu
Creating library ParRedSum.lib and object ParRedSum.exp
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvprof ./ParRedSum
Enter the number of elements : 4
Enter the elements :
2
49
12
54
==4900== NVPROF is profiling process 4900, command: ./ParRedSum
Sum is 117
==4900== Profiling application: ./ParRedSum
==4900== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 60.16% 2.4640us 1 2.4640us 2.4640us 2.4640us sum(int*, int)
25.00% 1.0240us 1 1.0240us 1.0240us 1.0240us [CUDA memcpy HtoD]
14.84% 608ns 1 608ns 608ns 608ns [CUDA memcpy DtoH]
API calls: 82.75% 203.24ms 1 203.24ms 203.24ms 203.24ms cudaMalloc
16.83% 41.338ms 1 41.338ms 41.338ms 41.338ms cuDevicePrimaryCtxRelease
0.16% 392.40us 97 4.0450us 100ns 220.50us cuDeviceGetAttribute
0.10% 243.20us 1 243.20us 243.20us 243.20us cudaFree
0.07% 170.80us 2 85.400us 62.900us 107.90us cudaMemcpy
0.06% 151.20us 1 151.20us 151.20us 151.20us cuModuleUnload
0.01% 29.200us 1 29.200us 29.200us 29.200us cudaLaunchKernel
0.01% 18.800us 1 18.800us 18.800us 18.800us cuDeviceTotalMem
0.00% 9.8000us 1 9.8000us 9.8000us 9.8000us cuDeviceGetPCIBusId
0.00% 1.3000us 3 433ns 200ns 800ns cuDeviceGetCount
0.00% 900ns 1 900ns 900ns 900ns cuDeviceGetName
0.00% 700ns 2 350ns 100ns 600ns cuDeviceGet
0.00% 400ns 1 400ns 400ns 400ns cuDeviceGetLuid
0.00% 200ns 1 200ns 200ns 200ns cuDeviceGetUuid
*/ | .text
.file "ParRedSum.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__sumPii # -- Begin function _Z18__device_stub__sumPii
.p2align 4, 0x90
.type _Z18__device_stub__sumPii,@function
_Z18__device_stub__sumPii: # @_Z18__device_stub__sumPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3sumPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z18__device_stub__sumPii, .Lfunc_end0-_Z18__device_stub__sumPii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x3fe0000000000000 # double 0.5
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $104, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $0, 12(%rsp)
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $32, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
leaq 12(%rsp), %rsi
movl $_ZSt3cin, %edi
callq _ZNSirsERi
movslq 12(%rsp), %rax
leal (,%rax,4), %ebp
leaq (,%rax,4), %rcx
testq %rax, %rax
movq $-1, %rdi
cmovnsq %rcx, %rdi
callq _Znam
movq %rax, %rbx
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $23, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
cmpl $0, 12(%rsp)
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
xorl %r15d, %r15d
movq %rbx, %r14
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $_ZSt3cin, %edi
movq %r14, %rsi
callq _ZNSirsERi
incq %r15
movslq 12(%rsp), %rax
addq $4, %r14
cmpq %rax, %r15
jl .LBB1_2
.LBB1_3: # %._crit_edge
movslq %ebp, %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
cvtsi2ssl 12(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LCPI1_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %rax
movl %eax, %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $1, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_5
# %bb.4:
movq 16(%rsp), %rax
movl 12(%rsp), %ecx
movq %rax, 96(%rsp)
movl %ecx, 28(%rsp)
leaq 96(%rsp), %rax
movq %rax, 32(%rsp)
leaq 28(%rsp), %rax
movq %rax, 40(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z3sumPii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_5:
movq 16(%rsp), %rsi
leaq 32(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 32(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB1_10
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB1_8
# %bb.7:
movzbl 67(%r14), %ecx
jmp .LBB1_9
.LBB1_8:
movq %r14, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB1_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq stdin(%rip), %rdi
callq getc
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq _ZdaPv
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_10:
.cfi_def_cfa_offset 144
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3sumPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3sumPii,@object # @_Z3sumPii
.section .rodata,"a",@progbits
.globl _Z3sumPii
.p2align 3, 0x0
_Z3sumPii:
.quad _Z18__device_stub__sumPii
.size _Z3sumPii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\nEnter the number of elements : "
.size .L.str, 33
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\nEnter the elements : \n"
.size .L.str.1, 24
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Sum is "
.size .L.str.2, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3sumPii"
.size .L__unnamed_1, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__sumPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3sumPii
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3sumPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */
/* 0x000fca00078e00ff */
/*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0050*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0060*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe40000000000 */
/*0070*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0080*/ IMAD.SHL.U32 R8, R9, 0x2, RZ ; /* 0x0000000209087824 */
/* 0x001fe400078e00ff */
/*0090*/ ISETP.GE.AND P1, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x000fe20003f26270 */
/*00a0*/ BSSY B0, 0x190 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*00b0*/ ISETP.NE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fd60003f05270 */
/*00c0*/ @P1 BRA 0x180 ; /* 0x000000b000001947 */
/* 0x000fea0003800000 */
/*00d0*/ IMAD R5, R8, UR4, RZ ; /* 0x0000000408057c24 */
/* 0x000fca000f8e02ff */
/*00e0*/ IADD3 R3, R5, UR4, RZ ; /* 0x0000000405037c10 */
/* 0x000fc8000fffe0ff */
/*00f0*/ ISETP.GE.AND P1, PT, R3, c[0x0][0x168], PT ; /* 0x00005a0003007a0c */
/* 0x000fda0003f26270 */
/*0100*/ @P1 BRA 0x180 ; /* 0x0000007000001947 */
/* 0x000fea0003800000 */
/*0110*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */
/* 0x000fd400000001ff */
/*0120*/ IMAD.WIDE R2, R3, R4, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fc800078e0204 */
/*0130*/ IMAD.WIDE R4, R5, R4, c[0x0][0x160] ; /* 0x0000580005047625 */
/* 0x000fe400078e0204 */
/*0140*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea8000c1e1900 */
/*0150*/ LDG.E R6, [R4.64] ; /* 0x0000000604067981 */
/* 0x000ea4000c1e1900 */
/*0160*/ IMAD.IADD R7, R6, 0x1, R3 ; /* 0x0000000106077824 */
/* 0x004fca00078e0203 */
/*0170*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e4000c101906 */
/*0180*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0190*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*01a0*/ I2F R4, R0 ; /* 0x0000000000047306 */
/* 0x001e220000201400 */
/*01b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01c0*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */
/* 0x000fcc000800063f */
/*01d0*/ F2F.F64.F32 R2, R4 ; /* 0x0000000400027310 */
/* 0x001e240000201800 */
/*01e0*/ DMUL R2, R2, 0.5 ; /* 0x3fe0000002027828 */
/* 0x001e0c0000000000 */
/*01f0*/ F2I.F64.CEIL R0, R2 ; /* 0x0000000200007311 */
/* 0x001e240000309100 */
/*0200*/ ISETP.GT.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x001fda0003f04270 */
/*0210*/ @P0 BRA 0x90 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3sumPii
.globl _Z3sumPii
.p2align 8
.type _Z3sumPii,@function
_Z3sumPii:
s_load_b32 s4, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
v_cmp_eq_u16_e64 s2, s4, 0
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 vcc_lo, exec_lo, s2
s_cbranch_vccnz .LBB0_7
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s1, s[0:1], 0x8
s_and_b32 s0, 0xffff, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v6, s0 :: v_dual_lshlrev_b32 v5, 1, v0
s_mov_b32 s4, 1
s_branch .LBB0_3
.p2align 6
.LBB0_2:
v_cvt_f32_i32_e32 v1, v6
s_lshl_b32 s4, s4, 1
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
v_cvt_f64_f32_e32 v[1:2], v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[1:2], v[1:2], 0.5
v_ceil_f64_e32 v[1:2], v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f64_e32 v6, v[1:2]
v_cmp_gt_i32_e64 s0, 1, v6
s_delay_alu instid0(VALU_DEP_1)
s_and_not1_b32 vcc_lo, exec_lo, s0
s_cbranch_vccz .LBB0_7
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v1, v5, s4
v_cmp_lt_u32_e32 vcc_lo, v0, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v3, s4, v1
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e64 s0, s1, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s5, vcc_lo, s0
s_and_saveexec_b32 s0, s5
s_cbranch_execz .LBB0_5
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v1, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
s_clause 0x1
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v[1:2], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v3, v4, v3
global_store_b32 v[1:2], v3, off
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s0
v_cmp_eq_u32_e32 vcc_lo, 1, v6
s_cbranch_vccz .LBB0_2
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3sumPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 6
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3sumPii, .Lfunc_end0-_Z3sumPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3sumPii
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z3sumPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0002ac37_00000000-6_ParRedSum.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3711:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3711:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z23__device_stub__Z3sumPiiPii
.type _Z23__device_stub__Z3sumPiiPii, @function
_Z23__device_stub__Z3sumPiiPii:
.LFB3733:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3sumPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3733:
.size _Z23__device_stub__Z3sumPiiPii, .-_Z23__device_stub__Z3sumPiiPii
.globl _Z3sumPii
.type _Z3sumPii, @function
_Z3sumPii:
.LFB3734:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3sumPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3734:
.size _Z3sumPii, .-_Z3sumPii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "\nEnter the number of elements : "
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "\nEnter the elements : \n"
.LC6:
.string "Sum is "
.text
.globl main
.type main, @function
main:
.LFB3708:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $48, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $0, 4(%rsp)
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq 4(%rsp), %rsi
leaq _ZSt3cin(%rip), %rdi
call _ZNSirsERi@PLT
movl 4(%rsp), %ebx
movslq %ebx, %rdi
movabsq $2305843009213693950, %rax
cmpq %rdi, %rax
jb .L12
leal 0(,%rbx,4), %r14d
salq $2, %rdi
call _Znam@PLT
movq %rax, %r13
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %r13, %rbp
movl $0, %ebx
leaq _ZSt3cin(%rip), %r12
cmpl $0, 4(%rsp)
jle .L14
.L16:
movq %rbp, %rsi
movq %r12, %rdi
call _ZNSirsERi@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %ebx, 4(%rsp)
jg .L16
.L14:
movslq %r14d, %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r13, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtsi2ssl 4(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LC2(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC7(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC3(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L17
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC5(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L17:
cvttsd2siq %xmm3, %rax
movl %eax, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L18:
leaq 28(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 8(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 28(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq stdin(%rip), %rdi
call getc@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq %r13, %rdi
call _ZdaPv@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
movq 40(%rsp), %rax
subq %fs:40, %rax
je .L15
call __stack_chk_fail@PLT
.L15:
call __cxa_throw_bad_array_new_length@PLT
.L23:
movl 4(%rsp), %esi
movq 8(%rsp), %rdi
call _Z23__device_stub__Z3sumPiiPii
jmp .L18
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3708:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z3sumPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3736:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z3sumPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3736:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1071644672
.align 8
.LC3:
.long 0
.long 1127219200
.align 8
.LC5:
.long 0
.long 1072693248
.align 8
.LC7:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "ParRedSum.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__sumPii # -- Begin function _Z18__device_stub__sumPii
.p2align 4, 0x90
.type _Z18__device_stub__sumPii,@function
_Z18__device_stub__sumPii: # @_Z18__device_stub__sumPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3sumPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z18__device_stub__sumPii, .Lfunc_end0-_Z18__device_stub__sumPii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x3fe0000000000000 # double 0.5
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $104, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $0, 12(%rsp)
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $32, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
leaq 12(%rsp), %rsi
movl $_ZSt3cin, %edi
callq _ZNSirsERi
movslq 12(%rsp), %rax
leal (,%rax,4), %ebp
leaq (,%rax,4), %rcx
testq %rax, %rax
movq $-1, %rdi
cmovnsq %rcx, %rdi
callq _Znam
movq %rax, %rbx
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $23, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
cmpl $0, 12(%rsp)
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
xorl %r15d, %r15d
movq %rbx, %r14
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $_ZSt3cin, %edi
movq %r14, %rsi
callq _ZNSirsERi
incq %r15
movslq 12(%rsp), %rax
addq $4, %r14
cmpq %rax, %r15
jl .LBB1_2
.LBB1_3: # %._crit_edge
movslq %ebp, %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
cvtsi2ssl 12(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LCPI1_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %rax
movl %eax, %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $1, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_5
# %bb.4:
movq 16(%rsp), %rax
movl 12(%rsp), %ecx
movq %rax, 96(%rsp)
movl %ecx, 28(%rsp)
leaq 96(%rsp), %rax
movq %rax, 32(%rsp)
leaq 28(%rsp), %rax
movq %rax, 40(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z3sumPii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_5:
movq 16(%rsp), %rsi
leaq 32(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 32(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB1_10
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB1_8
# %bb.7:
movzbl 67(%r14), %ecx
jmp .LBB1_9
.LBB1_8:
movq %r14, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB1_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq stdin(%rip), %rdi
callq getc
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq _ZdaPv
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_10:
.cfi_def_cfa_offset 144
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3sumPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3sumPii,@object # @_Z3sumPii
.section .rodata,"a",@progbits
.globl _Z3sumPii
.p2align 3, 0x0
_Z3sumPii:
.quad _Z18__device_stub__sumPii
.size _Z3sumPii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\nEnter the number of elements : "
.size .L.str, 33
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\nEnter the elements : \n"
.size .L.str.1, 24
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Sum is "
.size .L.str.2, 8
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3sumPii"
.size .L__unnamed_1, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__sumPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3sumPii
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cuda_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
inline __device__ int indirect(int *c, int i) {
// return c[c[i & 127] & 127] + i;
return int(exp(((((float(i))))) * 1e-18)) + i;
// printf("%d\n", c[i % m] - i % m + i - i);
// return i;
}
__constant__ int const_c[m];
__global__ void fd(float *a, float *b, int *c, int n) {
__shared__ float b_s[m];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m) {
b_s[i] = 0;
}
__syncthreads();
/*
if (threadIdx.x < m) {
b_s[threadIdx.x] = c[threadIdx.x];
}
__syncthreads();
*/
/*
float sum = 0;
if (i > 0)
sum += a[indirect(c, i) - 1];
*/
// sum += a[indirect(c, i)];
// sum += a[i + b_s[i & 127]];
/*
if (i < n - 1)
sum += a[indirect(c, i) + 1];
*/
// b[i] = (i * 1e-18);
// b[i] = i;
// b[i] = c[c[c[i & 64]]];
// atomicAdd(b_s + ((unsigned)i * 34252345627) % m, 1.0f);
// i = int(((((i * 1e-20f)))));
// i = (i * 1e-10f);
// i = i * i * i * i * i % m;
// b_s[i % m] = 1;
// #define C(x) i += (i >> x);
// #define C(x) i += (i >> x);
// for (int t = 0; t < 240; t++)
// C(30);
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
for (int j = 0; j < 27; j++) {
atomicAdd(b_s + (unsigned int)(i / 4 + j * 431) % (m / 1), 1.0f);
}
__syncthreads();
if (i < m) {
atomicAdd(&b[i], b_s[i]);
}
// atomicAdd(b + i % (m * m), 1);
/*
atomicAdd(&b_s[0], sqrt(sum));
if (threadIdx.x < m) {
atomicAdd(b + threadIdx.x, b_s[threadIdx.x]);
// b[threadIdx.x] += b_s[threadIdx.x];
}
*/
}
int main() {
int n = 128 * 1024 * 1024;
float *a, *b;
int *c;
cudaMallocManaged(&a, n * sizeof(float));
cudaMallocManaged(&b, n * sizeof(float));
cudaMallocManaged(&c, m * sizeof(float));
for (int i = 0; i < n; i++) {
a[i] = i * 1e-5f;
}
for (int i = 0; i < n; i++) {
b[i] = i * 1e-5f;
}
for (int i = 0; i < m; i++) {
c[i] = 0;
}
cudaMemcpyToSymbol(const_c, c, m * sizeof(float), 0, cudaMemcpyHostToDevice);
for (auto bs : {256, 512, 1024}) {
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 4; i++) {
auto t = get_time();
fd<<<n / bs, bs>>>(a, b, c, n);
cudaDeviceSynchronize();
t = get_time() - t;
printf("%.2f ms bw %.3f GB/s\n", t * 1000,
n * 2.0f * 4 / t / (1024 * 1024 * 1024.0f));
}
std::cout << std::endl;
}
} | .file "tmpxft_000ef42a_00000000-6_bandwidth.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8get_timev
.type _Z8get_timev, @function
_Z8get_timev:
.LFB3669:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z8get_timev, .-_Z8get_timev
.globl _Z26__device_stub__Z2fdPfS_PiiPfS_Pii
.type _Z26__device_stub__Z2fdPfS_PiiPfS_Pii, @function
_Z26__device_stub__Z2fdPfS_PiiPfS_Pii:
.LFB3696:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z2fdPfS_Pii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z26__device_stub__Z2fdPfS_PiiPfS_Pii, .-_Z26__device_stub__Z2fdPfS_PiiPfS_Pii
.globl _Z2fdPfS_Pii
.type _Z2fdPfS_Pii, @function
_Z2fdPfS_Pii:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z2fdPfS_PiiPfS_Pii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z2fdPfS_Pii, .-_Z2fdPfS_Pii
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "bs = "
.LC6:
.string "%.2f ms bw %.3f GB/s\n"
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rdi
movl $1, %edx
movl $536870912, %esi
call cudaMallocManaged@PLT
leaq 32(%rsp), %rdi
movl $1, %edx
movl $536870912, %esi
call cudaMallocManaged@PLT
leaq 40(%rsp), %rdi
movl $1, %edx
movl $1024, %esi
call cudaMallocManaged@PLT
movl $0, %eax
movss .LC1(%rip), %xmm1
.L16:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss %xmm1, %xmm0
movq 24(%rsp), %rdx
movss %xmm0, (%rdx,%rax,4)
addq $1, %rax
cmpq $134217728, %rax
jne .L16
movl $0, %eax
movss .LC1(%rip), %xmm1
.L17:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss %xmm1, %xmm0
movq 32(%rsp), %rdx
movss %xmm0, (%rdx,%rax,4)
addq $1, %rax
cmpq $134217728, %rax
jne .L17
movl $0, %eax
.L18:
movq 40(%rsp), %rdx
movl $0, (%rdx,%rax)
addq $4, %rax
cmpq $1024, %rax
jne .L18
movl $1, %r8d
movl $0, %ecx
movl $1024, %edx
movq 40(%rsp), %rsi
leaq _ZL7const_c(%rip), %rdi
call cudaMemcpyToSymbol@PLT
movl $256, 76(%rsp)
movl $512, 80(%rsp)
movl $1024, 84(%rsp)
leaq 76(%rsp), %r13
leaq 88(%rsp), %r15
leaq .LC2(%rip), %r14
jmp .L29
.L41:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L37
call _ZSt16__throw_bad_castv@PLT
.L37:
call __stack_chk_fail@PLT
.L21:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L22
.L23:
call cudaDeviceSynchronize@PLT
call _Z8get_timev
subsd 8(%rsp), %xmm0
movsd .LC3(%rip), %xmm1
divsd %xmm0, %xmm1
mulsd .LC5(%rip), %xmm0
mulsd .LC4(%rip), %xmm1
movq %r12, %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
subl $1, %ebp
je .L38
.L24:
call _Z8get_timev
movsd %xmm0, 8(%rsp)
movl %ebx, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $134217728, %eax
movl $0, %edx
idivl %ebx
movl %eax, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L23
movl $134217728, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z26__device_stub__Z2fdPfS_PiiPfS_Pii
jmp .L23
.L38:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rip), %rdx
movq 240(%rdx,%rax), %rbx
testq %rbx, %rbx
je .L39
cmpb $0, 56(%rbx)
je .L27
movzbl 67(%rbx), %esi
.L28:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $4, %r13
cmpq %r15, %r13
je .L40
.L29:
movl 0(%r13), %ebx
movl $5, %edx
movq %r14, %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r12
testq %r12, %r12
je .L41
cmpb $0, 56(%r12)
je .L21
movzbl 67(%r12), %esi
.L22:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl $4, %ebp
leaq .LC6(%rip), %r12
jmp .L24
.L39:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L42
call _ZSt16__throw_bad_castv@PLT
.L42:
call __stack_chk_fail@PLT
.L27:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L28
.L40:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L43
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z2fdPfS_Pii"
.LC8:
.string "const_c"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z2fdPfS_Pii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $1024, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL7const_c(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL7const_c
.comm _ZL7const_c,1024,32
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1598689907
.long 1051772663
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 925353388
.section .rodata.cst8
.align 8
.LC3:
.long 0
.long 1104150528
.align 8
.LC4:
.long 0
.long 1041235968
.align 8
.LC5:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cuda_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
inline __device__ int indirect(int *c, int i) {
// return c[c[i & 127] & 127] + i;
return int(exp(((((float(i))))) * 1e-18)) + i;
// printf("%d\n", c[i % m] - i % m + i - i);
// return i;
}
__constant__ int const_c[m];
__global__ void fd(float *a, float *b, int *c, int n) {
__shared__ float b_s[m];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m) {
b_s[i] = 0;
}
__syncthreads();
/*
if (threadIdx.x < m) {
b_s[threadIdx.x] = c[threadIdx.x];
}
__syncthreads();
*/
/*
float sum = 0;
if (i > 0)
sum += a[indirect(c, i) - 1];
*/
// sum += a[indirect(c, i)];
// sum += a[i + b_s[i & 127]];
/*
if (i < n - 1)
sum += a[indirect(c, i) + 1];
*/
// b[i] = (i * 1e-18);
// b[i] = i;
// b[i] = c[c[c[i & 64]]];
// atomicAdd(b_s + ((unsigned)i * 34252345627) % m, 1.0f);
// i = int(((((i * 1e-20f)))));
// i = (i * 1e-10f);
// i = i * i * i * i * i % m;
// b_s[i % m] = 1;
// #define C(x) i += (i >> x);
// #define C(x) i += (i >> x);
// for (int t = 0; t < 240; t++)
// C(30);
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
for (int j = 0; j < 27; j++) {
atomicAdd(b_s + (unsigned int)(i / 4 + j * 431) % (m / 1), 1.0f);
}
__syncthreads();
if (i < m) {
atomicAdd(&b[i], b_s[i]);
}
// atomicAdd(b + i % (m * m), 1);
/*
atomicAdd(&b_s[0], sqrt(sum));
if (threadIdx.x < m) {
atomicAdd(b + threadIdx.x, b_s[threadIdx.x]);
// b[threadIdx.x] += b_s[threadIdx.x];
}
*/
}
int main() {
int n = 128 * 1024 * 1024;
float *a, *b;
int *c;
cudaMallocManaged(&a, n * sizeof(float));
cudaMallocManaged(&b, n * sizeof(float));
cudaMallocManaged(&c, m * sizeof(float));
for (int i = 0; i < n; i++) {
a[i] = i * 1e-5f;
}
for (int i = 0; i < n; i++) {
b[i] = i * 1e-5f;
}
for (int i = 0; i < m; i++) {
c[i] = 0;
}
cudaMemcpyToSymbol(const_c, c, m * sizeof(float), 0, cudaMemcpyHostToDevice);
for (auto bs : {256, 512, 1024}) {
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 4; i++) {
auto t = get_time();
fd<<<n / bs, bs>>>(a, b, c, n);
cudaDeviceSynchronize();
t = get_time() - t;
printf("%.2f ms bw %.3f GB/s\n", t * 1000,
n * 2.0f * 4 / t / (1024 * 1024 * 1024.0f));
}
std::cout << std::endl;
}
} | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
inline __device__ int indirect(int *c, int i) {
// return c[c[i & 127] & 127] + i;
return int(exp(((((float(i))))) * 1e-18)) + i;
// printf("%d\n", c[i % m] - i % m + i - i);
// return i;
}
__constant__ int const_c[m];
__global__ void fd(float *a, float *b, int *c, int n) {
__shared__ float b_s[m];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m) {
b_s[i] = 0;
}
__syncthreads();
/*
if (threadIdx.x < m) {
b_s[threadIdx.x] = c[threadIdx.x];
}
__syncthreads();
*/
/*
float sum = 0;
if (i > 0)
sum += a[indirect(c, i) - 1];
*/
// sum += a[indirect(c, i)];
// sum += a[i + b_s[i & 127]];
/*
if (i < n - 1)
sum += a[indirect(c, i) + 1];
*/
// b[i] = (i * 1e-18);
// b[i] = i;
// b[i] = c[c[c[i & 64]]];
// atomicAdd(b_s + ((unsigned)i * 34252345627) % m, 1.0f);
// i = int(((((i * 1e-20f)))));
// i = (i * 1e-10f);
// i = i * i * i * i * i % m;
// b_s[i % m] = 1;
// #define C(x) i += (i >> x);
// #define C(x) i += (i >> x);
// for (int t = 0; t < 240; t++)
// C(30);
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
for (int j = 0; j < 27; j++) {
atomicAdd(b_s + (unsigned int)(i / 4 + j * 431) % (m / 1), 1.0f);
}
__syncthreads();
if (i < m) {
atomicAdd(&b[i], b_s[i]);
}
// atomicAdd(b + i % (m * m), 1);
/*
atomicAdd(&b_s[0], sqrt(sum));
if (threadIdx.x < m) {
atomicAdd(b + threadIdx.x, b_s[threadIdx.x]);
// b[threadIdx.x] += b_s[threadIdx.x];
}
*/
}
int main() {
int n = 128 * 1024 * 1024;
float *a, *b;
int *c;
hipMallocManaged(&a, n * sizeof(float));
hipMallocManaged(&b, n * sizeof(float));
hipMallocManaged(&c, m * sizeof(float));
for (int i = 0; i < n; i++) {
a[i] = i * 1e-5f;
}
for (int i = 0; i < n; i++) {
b[i] = i * 1e-5f;
}
for (int i = 0; i < m; i++) {
c[i] = 0;
}
hipMemcpyToSymbol(HIP_SYMBOL(const_c), c, m * sizeof(float), 0, hipMemcpyHostToDevice);
for (auto bs : {256, 512, 1024}) {
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 4; i++) {
auto t = get_time();
fd<<<n / bs, bs>>>(a, b, c, n);
hipDeviceSynchronize();
t = get_time() - t;
printf("%.2f ms bw %.3f GB/s\n", t * 1000,
n * 2.0f * 4 / t / (1024 * 1024 * 1024.0f));
}
std::cout << std::endl;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
inline __device__ int indirect(int *c, int i) {
// return c[c[i & 127] & 127] + i;
return int(exp(((((float(i))))) * 1e-18)) + i;
// printf("%d\n", c[i % m] - i % m + i - i);
// return i;
}
__constant__ int const_c[m];
__global__ void fd(float *a, float *b, int *c, int n) {
__shared__ float b_s[m];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m) {
b_s[i] = 0;
}
__syncthreads();
/*
if (threadIdx.x < m) {
b_s[threadIdx.x] = c[threadIdx.x];
}
__syncthreads();
*/
/*
float sum = 0;
if (i > 0)
sum += a[indirect(c, i) - 1];
*/
// sum += a[indirect(c, i)];
// sum += a[i + b_s[i & 127]];
/*
if (i < n - 1)
sum += a[indirect(c, i) + 1];
*/
// b[i] = (i * 1e-18);
// b[i] = i;
// b[i] = c[c[c[i & 64]]];
// atomicAdd(b_s + ((unsigned)i * 34252345627) % m, 1.0f);
// i = int(((((i * 1e-20f)))));
// i = (i * 1e-10f);
// i = i * i * i * i * i % m;
// b_s[i % m] = 1;
// #define C(x) i += (i >> x);
// #define C(x) i += (i >> x);
// for (int t = 0; t < 240; t++)
// C(30);
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
i += int(sin(i * 1e-20f));
for (int j = 0; j < 27; j++) {
atomicAdd(b_s + (unsigned int)(i / 4 + j * 431) % (m / 1), 1.0f);
}
__syncthreads();
if (i < m) {
atomicAdd(&b[i], b_s[i]);
}
// atomicAdd(b + i % (m * m), 1);
/*
atomicAdd(&b_s[0], sqrt(sum));
if (threadIdx.x < m) {
atomicAdd(b + threadIdx.x, b_s[threadIdx.x]);
// b[threadIdx.x] += b_s[threadIdx.x];
}
*/
}
int main() {
int n = 128 * 1024 * 1024;
float *a, *b;
int *c;
hipMallocManaged(&a, n * sizeof(float));
hipMallocManaged(&b, n * sizeof(float));
hipMallocManaged(&c, m * sizeof(float));
for (int i = 0; i < n; i++) {
a[i] = i * 1e-5f;
}
for (int i = 0; i < n; i++) {
b[i] = i * 1e-5f;
}
for (int i = 0; i < m; i++) {
c[i] = 0;
}
hipMemcpyToSymbol(HIP_SYMBOL(const_c), c, m * sizeof(float), 0, hipMemcpyHostToDevice);
for (auto bs : {256, 512, 1024}) {
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 4; i++) {
auto t = get_time();
fd<<<n / bs, bs>>>(a, b, c, n);
hipDeviceSynchronize();
t = get_time() - t;
printf("%.2f ms bw %.3f GB/s\n", t * 1000,
n * 2.0f * 4 / t / (1024 * 1024 * 1024.0f));
}
std::cout << std::endl;
}
} | .text
.file "bandwidth.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z8get_timev
.LCPI0_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z8get_timev
.p2align 4, 0x90
.type _Z8get_timev,@function
_Z8get_timev: # @_Z8get_timev
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z8get_timev, .Lfunc_end0-_Z8get_timev
.cfi_endproc
# -- End function
.globl _Z17__device_stub__fdPfS_Pii # -- Begin function _Z17__device_stub__fdPfS_Pii
.p2align 4, 0x90
.type _Z17__device_stub__fdPfS_Pii,@function
_Z17__device_stub__fdPfS_Pii: # @_Z17__device_stub__fdPfS_Pii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z2fdPfS_Pii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z17__device_stub__fdPfS_Pii, .Lfunc_end1-_Z17__device_stub__fdPfS_Pii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI2_0:
.long 0x3727c5ac # float 9.99999974E-6
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI2_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI2_2:
.quad 0x408f400000000000 # double 1000
.LCPI2_3:
.quad 0x41d0000000000000 # double 1073741824
.LCPI2_4:
.quad 0x3e10000000000000 # double 9.3132257461547852E-10
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 40(%rsp), %rdi
movl $536870912, %esi # imm = 0x20000000
movl $1, %edx
callq hipMallocManaged
leaq 32(%rsp), %rdi
movl $536870912, %esi # imm = 0x20000000
movl $1, %edx
callq hipMallocManaged
leaq 24(%rsp), %rdi
movl $1024, %esi # imm = 0x400
movl $1, %edx
callq hipMallocManaged
xorl %eax, %eax
movq 40(%rsp), %rcx
movss .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%rcx,%rax,4)
incq %rax
cmpq $134217728, %rax # imm = 0x8000000
jne .LBB2_1
# %bb.2: # %.preheader49
movq 32(%rsp), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_3: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %ecx, %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%rax,%rcx,4)
incq %rcx
cmpq $134217728, %rcx # imm = 0x8000000
jne .LBB2_3
# %bb.4: # %.preheader
movabsq $4294967296, %r15 # imm = 0x100000000
movq 24(%rsp), %rbx
xorl %r12d, %r12d
movl $1024, %edx # imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
callq memset@PLT
movl $const_c, %edi
movl $1024, %edx # imm = 0x400
movq %rbx, %rsi
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
movabsq $2199023255808, %rax # imm = 0x20000000100
movq %rax, 172(%rsp)
movl $1024, 180(%rsp) # imm = 0x400
leaq 48(%rsp), %rbx
jmp .LBB2_5
.p2align 4, 0x90
.LBB2_16: # in Loop: Header=BB2_5 Depth=1
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_17: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit43
# in Loop: Header=BB2_5 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
addq $4, %r12
cmpq $12, %r12
je .LBB2_18
.LBB2_5: # =>This Loop Header: Depth=1
# Child Loop BB2_10 Depth 2
movl 172(%rsp,%r12), %r14d
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $5, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r14d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB2_19
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB2_5 Depth=1
cmpb $0, 56(%rbp)
je .LBB2_8
# %bb.7: # in Loop: Header=BB2_5 Depth=1
movzbl 67(%rbp), %ecx
jmp .LBB2_9
.p2align 4, 0x90
.LBB2_8: # in Loop: Header=BB2_5 Depth=1
movq %rbp, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB2_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB2_5 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %r14, %rbp
orq %r15, %rbp
movl $4, %r13d
jmp .LBB2_10
.p2align 4, 0x90
.LBB2_12: # in Loop: Header=BB2_10 Depth=2
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
addsd 88(%rsp), %xmm0 # 8-byte Folded Reload
movsd %xmm0, 8(%rsp) # 8-byte Spill
callq hipDeviceSynchronize
movq %rbx, %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 48(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 56(%rsp), %xmm0
mulsd .LCPI2_1(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 8(%rsp), %xmm0 # 8-byte Folded Reload
movsd .LCPI2_3(%rip), %xmm1 # xmm1 = mem[0],zero
divsd %xmm0, %xmm1
mulsd .LCPI2_2(%rip), %xmm0
mulsd .LCPI2_4(%rip), %xmm1
movl $.L.str.1, %edi
movb $2, %al
callq printf
decl %r13d
je .LBB2_13
.LBB2_10: # Parent Loop BB2_5 Depth=1
# => This Inner Loop Header: Depth=2
movq %rbx, %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 48(%rsp), %xmm0
movsd %xmm0, 88(%rsp) # 8-byte Spill
xorps %xmm0, %xmm0
cvtsi2sdq 56(%rsp), %xmm0
mulsd .LCPI2_1(%rip), %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
movl $134217728, %eax # imm = 0x8000000
xorl %edx, %edx
idivl %r14d
# kill: def $eax killed $eax def $rax
orq %r15, %rax
movq %rax, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_12
# %bb.11: # in Loop: Header=BB2_10 Depth=2
movq 40(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 160(%rsp)
movq %rcx, 152(%rsp)
movq %rdx, 144(%rsp)
movl $134217728, 20(%rsp) # imm = 0x8000000
leaq 160(%rsp), %rax
movq %rax, 48(%rsp)
leaq 152(%rsp), %rax
movq %rax, 56(%rsp)
leaq 144(%rsp), %rax
movq %rax, 64(%rsp)
leaq 20(%rsp), %rax
movq %rax, 72(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
movl $_Z2fdPfS_Pii, %edi
movq %rbx, %r9
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_12
.p2align 4, 0x90
.LBB2_13: # in Loop: Header=BB2_5 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB2_19
# %bb.14: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i40
# in Loop: Header=BB2_5 Depth=1
cmpb $0, 56(%r14)
je .LBB2_16
# %bb.15: # in Loop: Header=BB2_5 Depth=1
movzbl 67(%r14), %eax
jmp .LBB2_17
.LBB2_18:
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_19:
.cfi_def_cfa_offset 240
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z2fdPfS_Pii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $const_c, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $1024, %r9d # imm = 0x400
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type const_c,@object # @const_c
.local const_c
.comm const_c,1024,16
.type _Z2fdPfS_Pii,@object # @_Z2fdPfS_Pii
.section .rodata,"a",@progbits
.globl _Z2fdPfS_Pii
.p2align 3, 0x0
_Z2fdPfS_Pii:
.quad _Z17__device_stub__fdPfS_Pii
.size _Z2fdPfS_Pii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "bs = "
.size .L.str, 6
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%.2f ms bw %.3f GB/s\n"
.size .L.str.1, 24
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z2fdPfS_Pii"
.size .L__unnamed_1, 13
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "const_c"
.size .L__unnamed_2, 8
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z17__device_stub__fdPfS_Pii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym const_c
.addrsig_sym _Z2fdPfS_Pii
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000ef42a_00000000-6_bandwidth.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8get_timev
.type _Z8get_timev, @function
_Z8get_timev:
.LFB3669:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z8get_timev, .-_Z8get_timev
.globl _Z26__device_stub__Z2fdPfS_PiiPfS_Pii
.type _Z26__device_stub__Z2fdPfS_PiiPfS_Pii, @function
_Z26__device_stub__Z2fdPfS_PiiPfS_Pii:
.LFB3696:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z2fdPfS_Pii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z26__device_stub__Z2fdPfS_PiiPfS_Pii, .-_Z26__device_stub__Z2fdPfS_PiiPfS_Pii
.globl _Z2fdPfS_Pii
.type _Z2fdPfS_Pii, @function
_Z2fdPfS_Pii:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z2fdPfS_PiiPfS_Pii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z2fdPfS_Pii, .-_Z2fdPfS_Pii
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "bs = "
.LC6:
.string "%.2f ms bw %.3f GB/s\n"
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rdi
movl $1, %edx
movl $536870912, %esi
call cudaMallocManaged@PLT
leaq 32(%rsp), %rdi
movl $1, %edx
movl $536870912, %esi
call cudaMallocManaged@PLT
leaq 40(%rsp), %rdi
movl $1, %edx
movl $1024, %esi
call cudaMallocManaged@PLT
movl $0, %eax
movss .LC1(%rip), %xmm1
.L16:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss %xmm1, %xmm0
movq 24(%rsp), %rdx
movss %xmm0, (%rdx,%rax,4)
addq $1, %rax
cmpq $134217728, %rax
jne .L16
movl $0, %eax
movss .LC1(%rip), %xmm1
.L17:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss %xmm1, %xmm0
movq 32(%rsp), %rdx
movss %xmm0, (%rdx,%rax,4)
addq $1, %rax
cmpq $134217728, %rax
jne .L17
movl $0, %eax
.L18:
movq 40(%rsp), %rdx
movl $0, (%rdx,%rax)
addq $4, %rax
cmpq $1024, %rax
jne .L18
movl $1, %r8d
movl $0, %ecx
movl $1024, %edx
movq 40(%rsp), %rsi
leaq _ZL7const_c(%rip), %rdi
call cudaMemcpyToSymbol@PLT
movl $256, 76(%rsp)
movl $512, 80(%rsp)
movl $1024, 84(%rsp)
leaq 76(%rsp), %r13
leaq 88(%rsp), %r15
leaq .LC2(%rip), %r14
jmp .L29
.L41:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L37
call _ZSt16__throw_bad_castv@PLT
.L37:
call __stack_chk_fail@PLT
.L21:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L22
.L23:
call cudaDeviceSynchronize@PLT
call _Z8get_timev
subsd 8(%rsp), %xmm0
movsd .LC3(%rip), %xmm1
divsd %xmm0, %xmm1
mulsd .LC5(%rip), %xmm0
mulsd .LC4(%rip), %xmm1
movq %r12, %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
subl $1, %ebp
je .L38
.L24:
call _Z8get_timev
movsd %xmm0, 8(%rsp)
movl %ebx, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $134217728, %eax
movl $0, %edx
idivl %ebx
movl %eax, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movl $1, %ecx
movq 52(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L23
movl $134217728, %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z26__device_stub__Z2fdPfS_PiiPfS_Pii
jmp .L23
.L38:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rip), %rdx
movq 240(%rdx,%rax), %rbx
testq %rbx, %rbx
je .L39
cmpb $0, 56(%rbx)
je .L27
movzbl 67(%rbx), %esi
.L28:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $4, %r13
cmpq %r15, %r13
je .L40
.L29:
movl 0(%r13), %ebx
movl $5, %edx
movq %r14, %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r12
testq %r12, %r12
je .L41
cmpb $0, 56(%r12)
je .L21
movzbl 67(%r12), %esi
.L22:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl $4, %ebp
leaq .LC6(%rip), %r12
jmp .L24
.L39:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L42
call _ZSt16__throw_bad_castv@PLT
.L42:
call __stack_chk_fail@PLT
.L27:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L28
.L40:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L43
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z2fdPfS_Pii"
.LC8:
.string "const_c"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z2fdPfS_Pii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $1024, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL7const_c(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL7const_c
.comm _ZL7const_c,1024,32
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1598689907
.long 1051772663
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 925353388
.section .rodata.cst8
.align 8
.LC3:
.long 0
.long 1104150528
.align 8
.LC4:
.long 0
.long 1041235968
.align 8
.LC5:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "bandwidth.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z8get_timev
.LCPI0_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z8get_timev
.p2align 4, 0x90
.type _Z8get_timev,@function
_Z8get_timev: # @_Z8get_timev
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
leaq 8(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 8(%rsp), %xmm1
cvtsi2sdq 16(%rsp), %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z8get_timev, .Lfunc_end0-_Z8get_timev
.cfi_endproc
# -- End function
.globl _Z17__device_stub__fdPfS_Pii # -- Begin function _Z17__device_stub__fdPfS_Pii
.p2align 4, 0x90
.type _Z17__device_stub__fdPfS_Pii,@function
_Z17__device_stub__fdPfS_Pii: # @_Z17__device_stub__fdPfS_Pii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z2fdPfS_Pii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z17__device_stub__fdPfS_Pii, .Lfunc_end1-_Z17__device_stub__fdPfS_Pii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI2_0:
.long 0x3727c5ac # float 9.99999974E-6
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI2_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.LCPI2_2:
.quad 0x408f400000000000 # double 1000
.LCPI2_3:
.quad 0x41d0000000000000 # double 1073741824
.LCPI2_4:
.quad 0x3e10000000000000 # double 9.3132257461547852E-10
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 40(%rsp), %rdi
movl $536870912, %esi # imm = 0x20000000
movl $1, %edx
callq hipMallocManaged
leaq 32(%rsp), %rdi
movl $536870912, %esi # imm = 0x20000000
movl $1, %edx
callq hipMallocManaged
leaq 24(%rsp), %rdi
movl $1024, %esi # imm = 0x400
movl $1, %edx
callq hipMallocManaged
xorl %eax, %eax
movq 40(%rsp), %rcx
movss .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %eax, %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%rcx,%rax,4)
incq %rax
cmpq $134217728, %rax # imm = 0x8000000
jne .LBB2_1
# %bb.2: # %.preheader49
movq 32(%rsp), %rax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_3: # =>This Inner Loop Header: Depth=1
xorps %xmm1, %xmm1
cvtsi2ss %ecx, %xmm1
mulss %xmm0, %xmm1
movss %xmm1, (%rax,%rcx,4)
incq %rcx
cmpq $134217728, %rcx # imm = 0x8000000
jne .LBB2_3
# %bb.4: # %.preheader
movabsq $4294967296, %r15 # imm = 0x100000000
movq 24(%rsp), %rbx
xorl %r12d, %r12d
movl $1024, %edx # imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
callq memset@PLT
movl $const_c, %edi
movl $1024, %edx # imm = 0x400
movq %rbx, %rsi
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
movabsq $2199023255808, %rax # imm = 0x20000000100
movq %rax, 172(%rsp)
movl $1024, 180(%rsp) # imm = 0x400
leaq 48(%rsp), %rbx
jmp .LBB2_5
.p2align 4, 0x90
.LBB2_16: # in Loop: Header=BB2_5 Depth=1
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_17: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit43
# in Loop: Header=BB2_5 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
addq $4, %r12
cmpq $12, %r12
je .LBB2_18
.LBB2_5: # =>This Loop Header: Depth=1
# Child Loop BB2_10 Depth 2
movl 172(%rsp,%r12), %r14d
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $5, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r14d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB2_19
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB2_5 Depth=1
cmpb $0, 56(%rbp)
je .LBB2_8
# %bb.7: # in Loop: Header=BB2_5 Depth=1
movzbl 67(%rbp), %ecx
jmp .LBB2_9
.p2align 4, 0x90
.LBB2_8: # in Loop: Header=BB2_5 Depth=1
movq %rbp, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB2_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB2_5 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %r14, %rbp
orq %r15, %rbp
movl $4, %r13d
jmp .LBB2_10
.p2align 4, 0x90
.LBB2_12: # in Loop: Header=BB2_10 Depth=2
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
addsd 88(%rsp), %xmm0 # 8-byte Folded Reload
movsd %xmm0, 8(%rsp) # 8-byte Spill
callq hipDeviceSynchronize
movq %rbx, %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 48(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 56(%rsp), %xmm0
mulsd .LCPI2_1(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 8(%rsp), %xmm0 # 8-byte Folded Reload
movsd .LCPI2_3(%rip), %xmm1 # xmm1 = mem[0],zero
divsd %xmm0, %xmm1
mulsd .LCPI2_2(%rip), %xmm0
mulsd .LCPI2_4(%rip), %xmm1
movl $.L.str.1, %edi
movb $2, %al
callq printf
decl %r13d
je .LBB2_13
.LBB2_10: # Parent Loop BB2_5 Depth=1
# => This Inner Loop Header: Depth=2
movq %rbx, %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 48(%rsp), %xmm0
movsd %xmm0, 88(%rsp) # 8-byte Spill
xorps %xmm0, %xmm0
cvtsi2sdq 56(%rsp), %xmm0
mulsd .LCPI2_1(%rip), %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
movl $134217728, %eax # imm = 0x8000000
xorl %edx, %edx
idivl %r14d
# kill: def $eax killed $eax def $rax
orq %r15, %rax
movq %rax, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_12
# %bb.11: # in Loop: Header=BB2_10 Depth=2
movq 40(%rsp), %rax
movq 32(%rsp), %rcx
movq 24(%rsp), %rdx
movq %rax, 160(%rsp)
movq %rcx, 152(%rsp)
movq %rdx, 144(%rsp)
movl $134217728, 20(%rsp) # imm = 0x8000000
leaq 160(%rsp), %rax
movq %rax, 48(%rsp)
leaq 152(%rsp), %rax
movq %rax, 56(%rsp)
leaq 144(%rsp), %rax
movq %rax, 64(%rsp)
leaq 20(%rsp), %rax
movq %rax, 72(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
movl $_Z2fdPfS_Pii, %edi
movq %rbx, %r9
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB2_12
.p2align 4, 0x90
.LBB2_13: # in Loop: Header=BB2_5 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB2_19
# %bb.14: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i40
# in Loop: Header=BB2_5 Depth=1
cmpb $0, 56(%r14)
je .LBB2_16
# %bb.15: # in Loop: Header=BB2_5 Depth=1
movzbl 67(%r14), %eax
jmp .LBB2_17
.LBB2_18:
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_19:
.cfi_def_cfa_offset 240
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z2fdPfS_Pii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $const_c, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $1024, %r9d # imm = 0x400
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type const_c,@object # @const_c
.local const_c
.comm const_c,1024,16
.type _Z2fdPfS_Pii,@object # @_Z2fdPfS_Pii
.section .rodata,"a",@progbits
.globl _Z2fdPfS_Pii
.p2align 3, 0x0
_Z2fdPfS_Pii:
.quad _Z17__device_stub__fdPfS_Pii
.size _Z2fdPfS_Pii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "bs = "
.size .L.str, 6
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%.2f ms bw %.3f GB/s\n"
.size .L.str.1, 24
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z2fdPfS_Pii"
.size .L__unnamed_1, 13
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "const_c"
.size .L__unnamed_2, 8
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z17__device_stub__fdPfS_Pii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym const_c
.addrsig_sym _Z2fdPfS_Pii
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void cuda_gray(unsigned char *input, int offset, int streamSize, unsigned char* gray, int size) {
int gray_idx = (offset/3) + (blockIdx.x * blockDim.x + threadIdx.x);
int rgb_idx = (offset) + ((blockIdx.x * blockDim.x + threadIdx.x) * 3);
if (((blockIdx.x * blockDim.x + threadIdx.x)*3)>=streamSize || gray_idx>=size) {
return;
}
gray[gray_idx] = (gray_value[0] * input[rgb_idx]) + (gray_value[1] * input[rgb_idx + 1]) + (gray_value[2] * input[rgb_idx + 2]);
} | code for sm_80
Function : _Z9cuda_grayPhiiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ UIMAD.WIDE UR4, UR4, 0x55555556, URZ ; /* 0x55555556040478a5 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e260000002100 */
/*0050*/ ULEA.HI UR5, UR5, UR5, URZ, 0x1 ; /* 0x0000000505057291 */
/* 0x000fe2000f8f083f */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0070*/ IADD3 R10, R0.reuse, UR5, RZ ; /* 0x00000005000a7c10 */
/* 0x040fe2000fffe0ff */
/*0080*/ IMAD R0, R0, 0x3, RZ ; /* 0x0000000300007824 */
/* 0x000fc600078e02ff */
/*0090*/ ISETP.GE.AND P0, PT, R10, c[0x0][0x178], PT ; /* 0x00005e000a007a0c */
/* 0x000fc80003f06270 */
/*00a0*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x16c], P0 ; /* 0x00005b0000007a0c */
/* 0x000fda0000706470 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ IADD3 R0, R0, c[0x0][0x168], RZ ; /* 0x00005a0000007a10 */
/* 0x000fe20007ffe0ff */
/*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00e0*/ IADD3 R2, P0, R0, c[0x0][0x160], RZ ; /* 0x0000580000027a10 */
/* 0x000fc80007f1e0ff */
/*00f0*/ LEA.HI.X.SX32 R3, R0, c[0x0][0x164], 0x1, P0 ; /* 0x0000590000037a11 */
/* 0x000fca00000f0eff */
/*0100*/ LDG.E.U8 R6, [R2.64+0x1] ; /* 0x0000010402067981 */
/* 0x000ea8000c1e1100 */
/*0110*/ LDG.E.U8 R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ee8000c1e1100 */
/*0120*/ LDG.E.U8 R7, [R2.64+0x2] ; /* 0x0000020402077981 */
/* 0x000f22000c1e1100 */
/*0130*/ I2F.U16 R6, R6 ; /* 0x0000000600067306 */
/* 0x004e300000101000 */
/*0140*/ I2F.U16 R0, R0 ; /* 0x0000000000007306 */
/* 0x008e700000101000 */
/*0150*/ I2F.U16 R4, R7 ; /* 0x0000000700047306 */
/* 0x010ea20000101000 */
/*0160*/ FMUL R5, R6, c[0x3][0x4] ; /* 0x00c0010006057a20 */
/* 0x001fc80000400000 */
/*0170*/ FFMA R5, R0, c[0x3][0x0], R5 ; /* 0x00c0000000057a23 */
/* 0x002fc80000000005 */
/*0180*/ FFMA R8, R4, c[0x3][0x8], R5 ; /* 0x00c0020004087a23 */
/* 0x004fe20000000005 */
/*0190*/ IADD3 R4, P0, R10, c[0x0][0x170], RZ ; /* 0x00005c000a047a10 */
/* 0x000fc60007f1e0ff */
/*01a0*/ F2I.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000e22000020f000 */
/*01b0*/ LEA.HI.X.SX32 R5, R10, c[0x0][0x174], 0x1, P0 ; /* 0x00005d000a057a11 */
/* 0x000fca00000f0eff */
/*01c0*/ STG.E.U8 [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x001fe2000c101104 */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void cuda_gray(unsigned char *input, int offset, int streamSize, unsigned char* gray, int size) {
int gray_idx = (offset/3) + (blockIdx.x * blockDim.x + threadIdx.x);
int rgb_idx = (offset) + ((blockIdx.x * blockDim.x + threadIdx.x) * 3);
if (((blockIdx.x * blockDim.x + threadIdx.x)*3)>=streamSize || gray_idx>=size) {
return;
}
gray[gray_idx] = (gray_value[0] * input[rgb_idx]) + (gray_value[1] * input[rgb_idx + 1]) + (gray_value[2] * input[rgb_idx + 2]);
} | .file "tmpxft_000f499b_00000000-6_cuda_gray.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i
.type _Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i, @function
_Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movq %rcx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9cuda_grayPhiiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i, .-_Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i
.globl _Z9cuda_grayPhiiS_i
.type _Z9cuda_grayPhiiS_i, @function
_Z9cuda_grayPhiiS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9cuda_grayPhiiS_i, .-_Z9cuda_grayPhiiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9cuda_grayPhiiS_i"
.LC1:
.string "gray_value"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9cuda_grayPhiiS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $12, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10gray_value(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL10gray_value
.comm _ZL10gray_value,12,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void cuda_gray(unsigned char *input, int offset, int streamSize, unsigned char* gray, int size) {
int gray_idx = (offset/3) + (blockIdx.x * blockDim.x + threadIdx.x);
int rgb_idx = (offset) + ((blockIdx.x * blockDim.x + threadIdx.x) * 3);
if (((blockIdx.x * blockDim.x + threadIdx.x)*3)>=streamSize || gray_idx>=size) {
return;
}
gray[gray_idx] = (gray_value[0] * input[rgb_idx]) + (gray_value[1] * input[rgb_idx + 1]) + (gray_value[2] * input[rgb_idx + 2]);
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cuda_gray(unsigned char *input, int offset, int streamSize, unsigned char* gray, int size) {
int gray_idx = (offset/3) + (blockIdx.x * blockDim.x + threadIdx.x);
int rgb_idx = (offset) + ((blockIdx.x * blockDim.x + threadIdx.x) * 3);
if (((blockIdx.x * blockDim.x + threadIdx.x)*3)>=streamSize || gray_idx>=size) {
return;
}
gray[gray_idx] = (gray_value[0] * input[rgb_idx]) + (gray_value[1] * input[rgb_idx + 1]) + (gray_value[2] * input[rgb_idx + 2]);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cuda_gray(unsigned char *input, int offset, int streamSize, unsigned char* gray, int size) {
int gray_idx = (offset/3) + (blockIdx.x * blockDim.x + threadIdx.x);
int rgb_idx = (offset) + ((blockIdx.x * blockDim.x + threadIdx.x) * 3);
if (((blockIdx.x * blockDim.x + threadIdx.x)*3)>=streamSize || gray_idx>=size) {
return;
}
gray[gray_idx] = (gray_value[0] * input[rgb_idx]) + (gray_value[1] * input[rgb_idx + 1]) + (gray_value[2] * input[rgb_idx + 2]);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9cuda_grayPhiiS_i
.globl _Z9cuda_grayPhiiS_i
.p2align 8
.type _Z9cuda_grayPhiiS_i,@function
_Z9cuda_grayPhiiS_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x8
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s15, s2, v[0:1]
s_mul_hi_i32 s2, s4, 0x55555556
s_lshr_b32 s6, s2, 31
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshl_add_u32 v1, v2, 1, v2
v_add3_u32 v0, s2, s6, v2
v_cmp_gt_u32_e32 vcc_lo, s5, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s3, v0
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b64 s[2:3], s[0:1], 0x0
v_add_nc_u32_e32 v1, s4, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, gray_value@rel32@lo+8
s_addc_u32 s3, s3, gray_value@rel32@hi+16
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, gray_value@rel32@lo+4
s_addc_u32 s5, s5, gray_value@rel32@hi+12
s_load_b32 s6, s[2:3], 0x0
s_clause 0x2
global_load_u8 v3, v[1:2], off offset:1
global_load_u8 v4, v[1:2], off
global_load_u8 v1, v[1:2], off offset:2
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, gray_value@rel32@lo+12
s_addc_u32 s3, s3, gray_value@rel32@hi+20
s_clause 0x1
s_load_b32 s4, s[4:5], 0x0
s_load_b32 s2, s[2:3], 0x0
s_waitcnt vmcnt(2)
v_cvt_f32_ubyte0_e32 v2, v3
s_waitcnt vmcnt(1)
v_cvt_f32_ubyte0_e32 v3, v4
s_waitcnt vmcnt(0)
v_cvt_f32_ubyte0_e32 v1, v1
s_waitcnt lgkmcnt(0)
v_mul_f32_e32 v2, s6, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v2, s4, v3
v_fmac_f32_e32 v2, s2, v1
v_ashrrev_i32_e32 v1, 31, v0
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cvt_i32_f32_e32 v2, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b8 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9cuda_grayPhiiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9cuda_grayPhiiS_i, .Lfunc_end0-_Z9cuda_grayPhiiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected gray_value
.type gray_value,@object
.data
.globl gray_value
.p2align 2, 0x0
gray_value:
.long 0x3e99999a
.long 0x3f147ae1
.long 0x3de147ae
.size gray_value, 12
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym gray_value
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9cuda_grayPhiiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9cuda_grayPhiiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cuda_gray(unsigned char *input, int offset, int streamSize, unsigned char* gray, int size) {
int gray_idx = (offset/3) + (blockIdx.x * blockDim.x + threadIdx.x);
int rgb_idx = (offset) + ((blockIdx.x * blockDim.x + threadIdx.x) * 3);
if (((blockIdx.x * blockDim.x + threadIdx.x)*3)>=streamSize || gray_idx>=size) {
return;
}
gray[gray_idx] = (gray_value[0] * input[rgb_idx]) + (gray_value[1] * input[rgb_idx + 1]) + (gray_value[2] * input[rgb_idx + 2]);
} | .text
.file "cuda_gray.hip"
.globl _Z24__device_stub__cuda_grayPhiiS_i # -- Begin function _Z24__device_stub__cuda_grayPhiiS_i
.p2align 4, 0x90
.type _Z24__device_stub__cuda_grayPhiiS_i,@function
_Z24__device_stub__cuda_grayPhiiS_i: # @_Z24__device_stub__cuda_grayPhiiS_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movl %edx, 8(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9cuda_grayPhiiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__cuda_grayPhiiS_i, .Lfunc_end0-_Z24__device_stub__cuda_grayPhiiS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9cuda_grayPhiiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $gray_value, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $12, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type gray_value,@object # @gray_value
.local gray_value
.comm gray_value,12,4
.type _Z9cuda_grayPhiiS_i,@object # @_Z9cuda_grayPhiiS_i
.section .rodata,"a",@progbits
.globl _Z9cuda_grayPhiiS_i
.p2align 3, 0x0
_Z9cuda_grayPhiiS_i:
.quad _Z24__device_stub__cuda_grayPhiiS_i
.size _Z9cuda_grayPhiiS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9cuda_grayPhiiS_i"
.size .L__unnamed_1, 20
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "gray_value"
.size .L__unnamed_2, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__cuda_grayPhiiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym gray_value
.addrsig_sym _Z9cuda_grayPhiiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9cuda_grayPhiiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ UIMAD.WIDE UR4, UR4, 0x55555556, URZ ; /* 0x55555556040478a5 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e260000002100 */
/*0050*/ ULEA.HI UR5, UR5, UR5, URZ, 0x1 ; /* 0x0000000505057291 */
/* 0x000fe2000f8f083f */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0070*/ IADD3 R10, R0.reuse, UR5, RZ ; /* 0x00000005000a7c10 */
/* 0x040fe2000fffe0ff */
/*0080*/ IMAD R0, R0, 0x3, RZ ; /* 0x0000000300007824 */
/* 0x000fc600078e02ff */
/*0090*/ ISETP.GE.AND P0, PT, R10, c[0x0][0x178], PT ; /* 0x00005e000a007a0c */
/* 0x000fc80003f06270 */
/*00a0*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x16c], P0 ; /* 0x00005b0000007a0c */
/* 0x000fda0000706470 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ IADD3 R0, R0, c[0x0][0x168], RZ ; /* 0x00005a0000007a10 */
/* 0x000fe20007ffe0ff */
/*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00e0*/ IADD3 R2, P0, R0, c[0x0][0x160], RZ ; /* 0x0000580000027a10 */
/* 0x000fc80007f1e0ff */
/*00f0*/ LEA.HI.X.SX32 R3, R0, c[0x0][0x164], 0x1, P0 ; /* 0x0000590000037a11 */
/* 0x000fca00000f0eff */
/*0100*/ LDG.E.U8 R6, [R2.64+0x1] ; /* 0x0000010402067981 */
/* 0x000ea8000c1e1100 */
/*0110*/ LDG.E.U8 R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ee8000c1e1100 */
/*0120*/ LDG.E.U8 R7, [R2.64+0x2] ; /* 0x0000020402077981 */
/* 0x000f22000c1e1100 */
/*0130*/ I2F.U16 R6, R6 ; /* 0x0000000600067306 */
/* 0x004e300000101000 */
/*0140*/ I2F.U16 R0, R0 ; /* 0x0000000000007306 */
/* 0x008e700000101000 */
/*0150*/ I2F.U16 R4, R7 ; /* 0x0000000700047306 */
/* 0x010ea20000101000 */
/*0160*/ FMUL R5, R6, c[0x3][0x4] ; /* 0x00c0010006057a20 */
/* 0x001fc80000400000 */
/*0170*/ FFMA R5, R0, c[0x3][0x0], R5 ; /* 0x00c0000000057a23 */
/* 0x002fc80000000005 */
/*0180*/ FFMA R8, R4, c[0x3][0x8], R5 ; /* 0x00c0020004087a23 */
/* 0x004fe20000000005 */
/*0190*/ IADD3 R4, P0, R10, c[0x0][0x170], RZ ; /* 0x00005c000a047a10 */
/* 0x000fc60007f1e0ff */
/*01a0*/ F2I.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000e22000020f000 */
/*01b0*/ LEA.HI.X.SX32 R5, R10, c[0x0][0x174], 0x1, P0 ; /* 0x00005d000a057a11 */
/* 0x000fca00000f0eff */
/*01c0*/ STG.E.U8 [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x001fe2000c101104 */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9cuda_grayPhiiS_i
.globl _Z9cuda_grayPhiiS_i
.p2align 8
.type _Z9cuda_grayPhiiS_i,@function
_Z9cuda_grayPhiiS_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x8
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s15, s2, v[0:1]
s_mul_hi_i32 s2, s4, 0x55555556
s_lshr_b32 s6, s2, 31
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshl_add_u32 v1, v2, 1, v2
v_add3_u32 v0, s2, s6, v2
v_cmp_gt_u32_e32 vcc_lo, s5, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s3, v0
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b64 s[2:3], s[0:1], 0x0
v_add_nc_u32_e32 v1, s4, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v2, 31, v1
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, gray_value@rel32@lo+8
s_addc_u32 s3, s3, gray_value@rel32@hi+16
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, gray_value@rel32@lo+4
s_addc_u32 s5, s5, gray_value@rel32@hi+12
s_load_b32 s6, s[2:3], 0x0
s_clause 0x2
global_load_u8 v3, v[1:2], off offset:1
global_load_u8 v4, v[1:2], off
global_load_u8 v1, v[1:2], off offset:2
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, gray_value@rel32@lo+12
s_addc_u32 s3, s3, gray_value@rel32@hi+20
s_clause 0x1
s_load_b32 s4, s[4:5], 0x0
s_load_b32 s2, s[2:3], 0x0
s_waitcnt vmcnt(2)
v_cvt_f32_ubyte0_e32 v2, v3
s_waitcnt vmcnt(1)
v_cvt_f32_ubyte0_e32 v3, v4
s_waitcnt vmcnt(0)
v_cvt_f32_ubyte0_e32 v1, v1
s_waitcnt lgkmcnt(0)
v_mul_f32_e32 v2, s6, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v2, s4, v3
v_fmac_f32_e32 v2, s2, v1
v_ashrrev_i32_e32 v1, 31, v0
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cvt_i32_f32_e32 v2, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b8 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9cuda_grayPhiiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9cuda_grayPhiiS_i, .Lfunc_end0-_Z9cuda_grayPhiiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected gray_value
.type gray_value,@object
.data
.globl gray_value
.p2align 2, 0x0
gray_value:
.long 0x3e99999a
.long 0x3f147ae1
.long 0x3de147ae
.size gray_value, 12
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym gray_value
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9cuda_grayPhiiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9cuda_grayPhiiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000f499b_00000000-6_cuda_gray.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i
.type _Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i, @function
_Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movq %rcx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9cuda_grayPhiiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i, .-_Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i
.globl _Z9cuda_grayPhiiS_i
.type _Z9cuda_grayPhiiS_i, @function
_Z9cuda_grayPhiiS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z9cuda_grayPhiiS_iPhiiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9cuda_grayPhiiS_i, .-_Z9cuda_grayPhiiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9cuda_grayPhiiS_i"
.LC1:
.string "gray_value"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9cuda_grayPhiiS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $12, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10gray_value(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL10gray_value
.comm _ZL10gray_value,12,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda_gray.hip"
.globl _Z24__device_stub__cuda_grayPhiiS_i # -- Begin function _Z24__device_stub__cuda_grayPhiiS_i
.p2align 4, 0x90
.type _Z24__device_stub__cuda_grayPhiiS_i,@function
_Z24__device_stub__cuda_grayPhiiS_i: # @_Z24__device_stub__cuda_grayPhiiS_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movl %edx, 8(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9cuda_grayPhiiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__cuda_grayPhiiS_i, .Lfunc_end0-_Z24__device_stub__cuda_grayPhiiS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9cuda_grayPhiiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $gray_value, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $12, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type gray_value,@object # @gray_value
.local gray_value
.comm gray_value,12,4
.type _Z9cuda_grayPhiiS_i,@object # @_Z9cuda_grayPhiiS_i
.section .rodata,"a",@progbits
.globl _Z9cuda_grayPhiiS_i
.p2align 3, 0x0
_Z9cuda_grayPhiiS_i:
.quad _Z24__device_stub__cuda_grayPhiiS_i
.size _Z9cuda_grayPhiiS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9cuda_grayPhiiS_i"
.size .L__unnamed_1, 20
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "gray_value"
.size .L__unnamed_2, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__cuda_grayPhiiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym gray_value
.addrsig_sym _Z9cuda_grayPhiiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Version %d.%d\n", prop.major, prop.minor);
printf(" Compute Mode: %d\n", prop.computeMode);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Multi Processor Count: %d\n\n", prop.multiProcessorCount);
printf(" TCC Driver: %d\n\n", prop.tccDriver);
printf(" Total Global Mem: %d\n\n", prop.totalGlobalMem);
printf(" Shared Mem Per Block: %d\n\n", prop.sharedMemPerBlock);
printf(" Registers Per Block: %d\n\n", prop.regsPerBlock);
printf(" Warpsize: %d\n\n", prop.warpSize);
printf(" MemPitch: %d\n\n", prop.memPitch);
printf(" MaxThreadsPerBlock: %d\n\n", prop.maxThreadsPerBlock);
printf(" Can Map Host Memory: %d\n\n", prop.canMapHostMemory);
}
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Version %d.%d\n", prop.major, prop.minor);
printf(" Compute Mode: %d\n", prop.computeMode);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Multi Processor Count: %d\n\n", prop.multiProcessorCount);
printf(" TCC Driver: %d\n\n", prop.tccDriver);
printf(" Total Global Mem: %d\n\n", prop.totalGlobalMem);
printf(" Shared Mem Per Block: %d\n\n", prop.sharedMemPerBlock);
printf(" Registers Per Block: %d\n\n", prop.regsPerBlock);
printf(" Warpsize: %d\n\n", prop.warpSize);
printf(" MemPitch: %d\n\n", prop.memPitch);
printf(" MaxThreadsPerBlock: %d\n\n", prop.maxThreadsPerBlock);
printf(" Can Map Host Memory: %d\n\n", prop.canMapHostMemory);
}
} | .file "tmpxft_001631ba_00000000-6_cuda_properties.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Device Number: %d\n"
.LC1:
.string " Device name: %s\n"
.LC2:
.string " Version %d.%d\n"
.LC3:
.string " Compute Mode: %d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string " Memory Clock Rate (KHz): %d\n"
.align 8
.LC5:
.string " Memory Bus Width (bits): %d\n"
.align 8
.LC7:
.string " Peak Memory Bandwidth (GB/s): %f\n\n"
.section .rodata.str1.1
.LC8:
.string " Multi Processor Count: %d\n\n"
.LC9:
.string " TCC Driver: %d\n\n"
.LC10:
.string " Total Global Mem: %d\n\n"
.LC11:
.string " Shared Mem Per Block: %d\n\n"
.LC12:
.string " Registers Per Block: %d\n\n"
.LC13:
.string " Warpsize: %d\n\n"
.LC14:
.string " MemPitch: %d\n\n"
.LC15:
.string " MaxThreadsPerBlock: %d\n\n"
.LC16:
.string " Can Map Host Memory: %d\n\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $1056, %rsp
.cfi_def_cfa_offset 1104
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 12(%rsp)
jle .L4
movl $0, %ebx
leaq .LC0(%rip), %r13
leaq .LC1(%rip), %r12
leaq .LC2(%rip), %rbp
.L5:
leaq 16(%rsp), %r14
movl %ebx, %esi
movq %r14, %rdi
call cudaGetDeviceProperties_v2@PLT
movl %ebx, %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r14, %rdx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 380(%rsp), %ecx
movl 376(%rsp), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 420(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 624(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 628(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdl 624(%rsp), %xmm0
addsd %xmm0, %xmm0
movl 628(%rsp), %edx
leal 7(%rdx), %eax
testl %edx, %edx
cmovns %edx, %eax
sarl $3, %eax
pxor %xmm1, %xmm1
cvtsi2sdl %eax, %xmm1
mulsd %xmm1, %xmm0
divsd .LC6(%rip), %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl 404(%rsp), %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 612(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 304(%rsp), %rdx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 312(%rsp), %rdx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 320(%rsp), %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 324(%rsp), %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 328(%rsp), %rdx
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 416(%rsp), %edx
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jg .L5
.L4:
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L9
movl $0, %eax
addq $1056, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Version %d.%d\n", prop.major, prop.minor);
printf(" Compute Mode: %d\n", prop.computeMode);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Multi Processor Count: %d\n\n", prop.multiProcessorCount);
printf(" TCC Driver: %d\n\n", prop.tccDriver);
printf(" Total Global Mem: %d\n\n", prop.totalGlobalMem);
printf(" Shared Mem Per Block: %d\n\n", prop.sharedMemPerBlock);
printf(" Registers Per Block: %d\n\n", prop.regsPerBlock);
printf(" Warpsize: %d\n\n", prop.warpSize);
printf(" MemPitch: %d\n\n", prop.memPitch);
printf(" MaxThreadsPerBlock: %d\n\n", prop.maxThreadsPerBlock);
printf(" Can Map Host Memory: %d\n\n", prop.canMapHostMemory);
}
} | #include <hip/hip_runtime.h>
#include <stdio.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Version %d.%d\n", prop.major, prop.minor);
printf(" Compute Mode: %d\n", prop.computeMode);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Multi Processor Count: %d\n\n", prop.multiProcessorCount);
printf(" TCC Driver: %d\n\n", prop.tccDriver);
printf(" Total Global Mem: %d\n\n", prop.totalGlobalMem);
printf(" Shared Mem Per Block: %d\n\n", prop.sharedMemPerBlock);
printf(" Registers Per Block: %d\n\n", prop.regsPerBlock);
printf(" Warpsize: %d\n\n", prop.warpSize);
printf(" MemPitch: %d\n\n", prop.memPitch);
printf(" MaxThreadsPerBlock: %d\n\n", prop.maxThreadsPerBlock);
printf(" Can Map Host Memory: %d\n\n", prop.canMapHostMemory);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Version %d.%d\n", prop.major, prop.minor);
printf(" Compute Mode: %d\n", prop.computeMode);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Multi Processor Count: %d\n\n", prop.multiProcessorCount);
printf(" TCC Driver: %d\n\n", prop.tccDriver);
printf(" Total Global Mem: %d\n\n", prop.totalGlobalMem);
printf(" Shared Mem Per Block: %d\n\n", prop.sharedMemPerBlock);
printf(" Registers Per Block: %d\n\n", prop.regsPerBlock);
printf(" Warpsize: %d\n\n", prop.warpSize);
printf(" MemPitch: %d\n\n", prop.memPitch);
printf(" MaxThreadsPerBlock: %d\n\n", prop.maxThreadsPerBlock);
printf(" Can Map Host Memory: %d\n\n", prop.canMapHostMemory);
}
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Version %d.%d\n", prop.major, prop.minor);
printf(" Compute Mode: %d\n", prop.computeMode);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Multi Processor Count: %d\n\n", prop.multiProcessorCount);
printf(" TCC Driver: %d\n\n", prop.tccDriver);
printf(" Total Global Mem: %d\n\n", prop.totalGlobalMem);
printf(" Shared Mem Per Block: %d\n\n", prop.sharedMemPerBlock);
printf(" Registers Per Block: %d\n\n", prop.regsPerBlock);
printf(" Warpsize: %d\n\n", prop.warpSize);
printf(" MemPitch: %d\n\n", prop.memPitch);
printf(" MaxThreadsPerBlock: %d\n\n", prop.maxThreadsPerBlock);
printf(" Can Map Host Memory: %d\n\n", prop.canMapHostMemory);
}
} | .text
.file "cuda_properties.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1504
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 4(%rsp)
jle .LBB0_3
# %bb.1: # %.lr.ph
leaq 8(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl 368(%rsp), %esi
movl 372(%rsp), %edx
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 412(%rsp), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 616(%rsp), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl 620(%rsp), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
cvtsi2sdl 616(%rsp), %xmm1
addsd %xmm1, %xmm1
movl 620(%rsp), %eax
leal 7(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $3, %ecx
cvtsi2sd %ecx, %xmm0
mulsd %xmm1, %xmm0
divsd .LCPI0_0(%rip), %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
movl 396(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl 604(%rsp), %esi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movq 296(%rsp), %rsi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movq 304(%rsp), %rsi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl 312(%rsp), %esi
movl $.L.str.11, %edi
xorl %eax, %eax
callq printf
movl 316(%rsp), %esi
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
movq 320(%rsp), %rsi
movl $.L.str.13, %edi
xorl %eax, %eax
callq printf
movl 328(%rsp), %esi
movl $.L.str.14, %edi
xorl %eax, %eax
callq printf
movl 408(%rsp), %esi
movl $.L.str.15, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl 4(%rsp), %ebp
jl .LBB0_2
.LBB0_3: # %._crit_edge
xorl %eax, %eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Device Number: %d\n"
.size .L.str, 19
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " Device name: %s\n"
.size .L.str.1, 19
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " Version %d.%d\n"
.size .L.str.2, 17
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " Compute Mode: %d\n"
.size .L.str.3, 20
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz " Memory Clock Rate (KHz): %d\n"
.size .L.str.4, 31
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz " Memory Bus Width (bits): %d\n"
.size .L.str.5, 31
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz " Peak Memory Bandwidth (GB/s): %f\n\n"
.size .L.str.6, 37
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz " Multi Processor Count: %d\n\n"
.size .L.str.7, 30
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz " TCC Driver: %d\n\n"
.size .L.str.8, 19
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz " Total Global Mem: %d\n\n"
.size .L.str.9, 25
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz " Shared Mem Per Block: %d\n\n"
.size .L.str.10, 29
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz " Registers Per Block: %d\n\n"
.size .L.str.11, 28
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz " Warpsize: %d\n\n"
.size .L.str.12, 17
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz " MemPitch: %d\n\n"
.size .L.str.13, 17
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz " MaxThreadsPerBlock: %d\n\n"
.size .L.str.14, 27
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz " Can Map Host Memory: %d\n\n"
.size .L.str.15, 28
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001631ba_00000000-6_cuda_properties.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Device Number: %d\n"
.LC1:
.string " Device name: %s\n"
.LC2:
.string " Version %d.%d\n"
.LC3:
.string " Compute Mode: %d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string " Memory Clock Rate (KHz): %d\n"
.align 8
.LC5:
.string " Memory Bus Width (bits): %d\n"
.align 8
.LC7:
.string " Peak Memory Bandwidth (GB/s): %f\n\n"
.section .rodata.str1.1
.LC8:
.string " Multi Processor Count: %d\n\n"
.LC9:
.string " TCC Driver: %d\n\n"
.LC10:
.string " Total Global Mem: %d\n\n"
.LC11:
.string " Shared Mem Per Block: %d\n\n"
.LC12:
.string " Registers Per Block: %d\n\n"
.LC13:
.string " Warpsize: %d\n\n"
.LC14:
.string " MemPitch: %d\n\n"
.LC15:
.string " MaxThreadsPerBlock: %d\n\n"
.LC16:
.string " Can Map Host Memory: %d\n\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $1056, %rsp
.cfi_def_cfa_offset 1104
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 12(%rsp)
jle .L4
movl $0, %ebx
leaq .LC0(%rip), %r13
leaq .LC1(%rip), %r12
leaq .LC2(%rip), %rbp
.L5:
leaq 16(%rsp), %r14
movl %ebx, %esi
movq %r14, %rdi
call cudaGetDeviceProperties_v2@PLT
movl %ebx, %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r14, %rdx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 380(%rsp), %ecx
movl 376(%rsp), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 420(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 624(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 628(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdl 624(%rsp), %xmm0
addsd %xmm0, %xmm0
movl 628(%rsp), %edx
leal 7(%rdx), %eax
testl %edx, %edx
cmovns %edx, %eax
sarl $3, %eax
pxor %xmm1, %xmm1
cvtsi2sdl %eax, %xmm1
mulsd %xmm1, %xmm0
divsd .LC6(%rip), %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl 404(%rsp), %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 612(%rsp), %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 304(%rsp), %rdx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 312(%rsp), %rdx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 320(%rsp), %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 324(%rsp), %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 328(%rsp), %rdx
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 416(%rsp), %edx
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jg .L5
.L4:
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L9
movl $0, %eax
addq $1056, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda_properties.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1504
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 4(%rsp)
jle .LBB0_3
# %bb.1: # %.lr.ph
leaq 8(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl 368(%rsp), %esi
movl 372(%rsp), %edx
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 412(%rsp), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 616(%rsp), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl 620(%rsp), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
cvtsi2sdl 616(%rsp), %xmm1
addsd %xmm1, %xmm1
movl 620(%rsp), %eax
leal 7(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $3, %ecx
cvtsi2sd %ecx, %xmm0
mulsd %xmm1, %xmm0
divsd .LCPI0_0(%rip), %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
movl 396(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl 604(%rsp), %esi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movq 296(%rsp), %rsi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movq 304(%rsp), %rsi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl 312(%rsp), %esi
movl $.L.str.11, %edi
xorl %eax, %eax
callq printf
movl 316(%rsp), %esi
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
movq 320(%rsp), %rsi
movl $.L.str.13, %edi
xorl %eax, %eax
callq printf
movl 328(%rsp), %esi
movl $.L.str.14, %edi
xorl %eax, %eax
callq printf
movl 408(%rsp), %esi
movl $.L.str.15, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl 4(%rsp), %ebp
jl .LBB0_2
.LBB0_3: # %._crit_edge
xorl %eax, %eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Device Number: %d\n"
.size .L.str, 19
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " Device name: %s\n"
.size .L.str.1, 19
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " Version %d.%d\n"
.size .L.str.2, 17
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " Compute Mode: %d\n"
.size .L.str.3, 20
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz " Memory Clock Rate (KHz): %d\n"
.size .L.str.4, 31
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz " Memory Bus Width (bits): %d\n"
.size .L.str.5, 31
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz " Peak Memory Bandwidth (GB/s): %f\n\n"
.size .L.str.6, 37
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz " Multi Processor Count: %d\n\n"
.size .L.str.7, 30
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz " TCC Driver: %d\n\n"
.size .L.str.8, 19
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz " Total Global Mem: %d\n\n"
.size .L.str.9, 25
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz " Shared Mem Per Block: %d\n\n"
.size .L.str.10, 29
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz " Registers Per Block: %d\n\n"
.size .L.str.11, 28
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz " Warpsize: %d\n\n"
.size .L.str.12, 17
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz " MemPitch: %d\n\n"
.size .L.str.13, 17
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz " MaxThreadsPerBlock: %d\n\n"
.size .L.str.14, 27
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz " Can Map Host Memory: %d\n\n"
.size .L.str.15, 28
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Copyright 2020 Christopher Khan
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the license at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Description of GPU_kernels_single_precision.cu:
// This file contains the CUDA code that allows for performing the computations
// for GENRE on a GPU using single precision
// Define the GPU kernel that performs predictor normalization
__global__ void predictor_normalization(float * X_matrix_d, float * scaling_factors_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be normalized
int start_ind = 0;
// This if statement makes sure to not normalize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Normalize each predictor column so that the sum of the square of each predictor column is equal to 1
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the square of the predictor column
float sum_squared = 0.0f;
// Calculate the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_squared = sum_squared + (X_value * X_value);
}
// Calculate the square root of the sum of the square of the predictor column
float square_root_sum_squared = sqrtf(sum_squared);
// Store the square root of the sum of the square of the predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = square_root_sum_squared;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] / square_root_sum_squared;
}
}
// This if statement stores a scaling factor of 1 for the predictor column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that performs predictor standardization
__global__ void predictor_standardization(float * X_matrix_d, float * scaling_factors_d, float * mean_X_matrix_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Standardize each predictor column by subtracting the mean of the predictor column from each observation and diving each observation by the standard deviation of the predictor column
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the predictor column
float sum_value = 0.0f;
// Calculate the sum of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_value = sum_value + X_value;
}
// Calculate the mean of the predictor column
float mean_value = sum_value / (float)num_observations;
// Store the mean of the predictor column
mean_X_matrix_d[predictor_thread_stride + predictor_column] = mean_value;
// Declare and initialize the variable that stores the sum of the square of the demeaned predictor column
float sum_squared = 0.0f;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value_demeaned = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value;
sum_squared = sum_squared + (X_value_demeaned * X_value_demeaned);
}
// Calculate the standard deviation of the demeaned predictor column
float std = sqrtf(sum_squared / (float)num_observations);
// Store the standard deviation of the demeaned predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = std;
// Standardize the predictor column by subtracting its mean and dividing by its standard deviation
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = (X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value) / std;
}
}
// This if statement stores a scaling factor of 1 and a mean of 1 for the first column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
mean_X_matrix_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that calculates the standard deviations for each portion of the y_d array, standardizes the y_d array, and calculates the standardized lambda values
__global__ void model_fit_preparation(float * y_d, float * residual_y_d, float * model_fit_flag_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * observation_thread_stride_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations in the cropped_y_d array for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Declare and initialize the variable that stores the running sum of y for the fit
float sum_value = 0.0f;
// Calculate the running sums for sum_value
for (int observation = 0; observation < num_observations; observation++) {
float value = y_d[observation_thread_stride + observation];
sum_value += value;
}
// Calculate the mean of y for the fit
float mean = sum_value / (float)num_observations;
// Declare and initialize the variable that stores the standard deviation of y for the fit
float std = 0.0f;
// Calculate the standard deviation of y for the fit
for (int observation = 0; observation < num_observations; observation++) {
float value_2 = y_d[observation_thread_stride + observation];
std += ((value_2 - mean) * (value_2 - mean));
}
std = sqrtf(std / (float)num_observations);
// Store the standard deviation of y for the fit in the y_std_d array
y_std_d[fit_ind] = std;
// This if statement standardizes the lambda values and the y data if the standard deviation isn't 0
if (std != 0.0f) {
// Set the model fit flag to 1 if the standard deviation is not 0 and a model fit should be performed
model_fit_flag_d[fit_ind] = 1.0f;
// Calculate the standardized lambda value and store it into the standardized_lambda_d array
standardized_lambda_values_d[fit_ind] = standardized_lambda_values_d[fit_ind] / std;
// Standardize y for the fit and store it into the y_d array and the residual_y_d array
for (int observation = 0; observation < num_observations; observation++) {
float standardized_value = y_d[observation_thread_stride + observation] / std;
y_d[observation_thread_stride + observation] = standardized_value;
residual_y_d[observation_thread_stride + observation] = standardized_value;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = residual_y_d[observation_thread_stride + observation_row] + (X_value * B_j);
// Store the updated residual value back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = residual_y_d[observation_thread_stride + observation_row];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_d[observation_thread_stride + observation_row] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit_shared_memory(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Define the shared memory array that stores the residual values of the model fits within one block (the amount of bytes is declared in the GPU kernel call)
extern __shared__ float sdata[];
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Store the residual values for the fit into the shared memory array
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
int store_ind = (observation_row * num_threads_per_block) + block_thread_ind;
sdata[store_ind] = residual_y_d[observation_thread_stride + observation_row];
}
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind] + (X_value * B_j);
// Store the updated residual value back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = sdata[(observation_row * num_threads_per_block) + block_thread_ind] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unnormalization
__global__ void predictor_coefficient_unnormalization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unnormalized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Unnormalize the predictor coefficients
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
B_d[predictor_thread_stride + predictor_column] = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unstandardization
__global__ void predictor_coefficient_unstandardization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, float * mean_X_matrix_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unstandardized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Declare and initialize the variable that is used to adjust the intercept term if it is included
float sum_value = 0.0f;
// Perform predictor coefficient unstandardization
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
float B_unstandardized = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
B_d[predictor_thread_stride + predictor_column] = B_unstandardized;
sum_value = sum_value + (B_unstandardized * mean_X_matrix_d[predictor_thread_stride + predictor_column]);
}
// Adjust the intercept term if it is included
if (intercept_flag == 1) {
B_d[predictor_thread_stride] = B_d[predictor_thread_stride] - sum_value;
}
}
}
} | .file "tmpxft_00190987_00000000-6_GPU_kernels_single_precision.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z61__device_stub__Z23predictor_normalizationPfS_PdS0_S0_S0_S_iiiPfS_PdS0_S0_S0_S_iii
.type _Z61__device_stub__Z23predictor_normalizationPfS_PdS0_S0_S0_S_iiiPfS_PdS0_S0_S0_S_iii, @function
_Z61__device_stub__Z23predictor_normalizationPfS_PdS0_S0_S0_S_iiiPfS_PdS0_S0_S0_S_iii:
.LFB2051:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movq %r9, 16(%rsp)
movq 240(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
leaq 248(%rsp), %rax
movq %rax, 184(%rsp)
leaq 256(%rsp), %rax
movq %rax, 192(%rsp)
leaq 264(%rsp), %rax
movq %rax, 200(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z61__device_stub__Z23predictor_normalizationPfS_PdS0_S0_S0_S_iiiPfS_PdS0_S0_S0_S_iii, .-_Z61__device_stub__Z23predictor_normalizationPfS_PdS0_S0_S0_S_iiiPfS_PdS0_S0_S0_S_iii
.globl _Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii
.type _Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii, @function
_Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z61__device_stub__Z23predictor_normalizationPfS_PdS0_S0_S0_S_iiiPfS_PdS0_S0_S0_S_iii
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii, .-_Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii
.globl _Z65__device_stub__Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iiiPfS_S_PdS0_S0_S0_S_iii
.type _Z65__device_stub__Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iiiPfS_S_PdS0_S0_S0_S_iii, @function
_Z65__device_stub__Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iiiPfS_S_PdS0_S0_S0_S_iii:
.LFB2053:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movq %r9, 16(%rsp)
movq 240(%rsp), %rax
movq %rax, 8(%rsp)
movq 248(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
movq %rsp, %rax
movq %rax, 184(%rsp)
leaq 256(%rsp), %rax
movq %rax, 192(%rsp)
leaq 264(%rsp), %rax
movq %rax, 200(%rsp)
leaq 272(%rsp), %rax
movq %rax, 208(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z65__device_stub__Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iiiPfS_S_PdS0_S0_S0_S_iii, .-_Z65__device_stub__Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iiiPfS_S_PdS0_S0_S0_S_iii
.globl _Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii
.type _Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii, @function
_Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii:
.LFB2054:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
pushq 56(%rsp)
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z65__device_stub__Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iiiPfS_S_PdS0_S0_S0_S_iii
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii, .-_Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii
.globl _Z57__device_stub__Z21model_fit_preparationPfS_S_S_S_PdS0_iiiPfS_S_S_S_PdS0_iii
.type _Z57__device_stub__Z21model_fit_preparationPfS_S_S_S_PdS0_iiiPfS_S_S_S_PdS0_iii, @function
_Z57__device_stub__Z21model_fit_preparationPfS_S_S_S_PdS0_iiiPfS_S_S_S_PdS0_iii:
.LFB2055:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movq %r9, 16(%rsp)
movq 240(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
leaq 248(%rsp), %rax
movq %rax, 184(%rsp)
leaq 256(%rsp), %rax
movq %rax, 192(%rsp)
leaq 264(%rsp), %rax
movq %rax, 200(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z21model_fit_preparationPfS_S_S_S_PdS0_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z57__device_stub__Z21model_fit_preparationPfS_S_S_S_PdS0_iiiPfS_S_S_S_PdS0_iii, .-_Z57__device_stub__Z21model_fit_preparationPfS_S_S_S_PdS0_iiiPfS_S_S_S_PdS0_iii
.globl _Z21model_fit_preparationPfS_S_S_S_PdS0_iii
.type _Z21model_fit_preparationPfS_S_S_S_PdS0_iii, @function
_Z21model_fit_preparationPfS_S_S_S_PdS0_iii:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 40(%rsp)
.cfi_def_cfa_offset 48
call _Z57__device_stub__Z21model_fit_preparationPfS_S_S_S_PdS0_iiiPfS_S_S_S_PdS0_iii
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _Z21model_fit_preparationPfS_S_S_S_PdS0_iii, .-_Z21model_fit_preparationPfS_S_S_S_PdS0_iii
.globl _Z64__device_stub__Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.type _Z64__device_stub__Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, @function
_Z64__device_stub__Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii:
.LFB2057:
.cfi_startproc
endbr64
subq $360, %rsp
.cfi_def_cfa_offset 368
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movq 368(%rsp), %rax
movq %rax, 72(%rsp)
movq 376(%rsp), %rax
movq %rax, 64(%rsp)
movq 384(%rsp), %rax
movq %rax, 56(%rsp)
movq 392(%rsp), %rax
movq %rax, 48(%rsp)
movq 400(%rsp), %rax
movq %rax, 40(%rsp)
movq 408(%rsp), %rax
movq %rax, 32(%rsp)
movq 416(%rsp), %rax
movq %rax, 24(%rsp)
movq 424(%rsp), %rax
movq %rax, 16(%rsp)
movq 432(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 344(%rsp)
xorl %eax, %eax
leaq 120(%rsp), %rax
movq %rax, 192(%rsp)
leaq 112(%rsp), %rax
movq %rax, 200(%rsp)
leaq 104(%rsp), %rax
movq %rax, 208(%rsp)
leaq 96(%rsp), %rax
movq %rax, 216(%rsp)
leaq 88(%rsp), %rax
movq %rax, 224(%rsp)
leaq 80(%rsp), %rax
movq %rax, 232(%rsp)
leaq 72(%rsp), %rax
movq %rax, 240(%rsp)
leaq 64(%rsp), %rax
movq %rax, 248(%rsp)
leaq 56(%rsp), %rax
movq %rax, 256(%rsp)
leaq 48(%rsp), %rax
movq %rax, 264(%rsp)
leaq 40(%rsp), %rax
movq %rax, 272(%rsp)
leaq 32(%rsp), %rax
movq %rax, 280(%rsp)
leaq 24(%rsp), %rax
movq %rax, 288(%rsp)
leaq 16(%rsp), %rax
movq %rax, 296(%rsp)
leaq 8(%rsp), %rax
movq %rax, 304(%rsp)
leaq 440(%rsp), %rax
movq %rax, 312(%rsp)
leaq 448(%rsp), %rax
movq %rax, 320(%rsp)
leaq 456(%rsp), %rax
movq %rax, 328(%rsp)
leaq 464(%rsp), %rax
movq %rax, 336(%rsp)
movl $1, 144(%rsp)
movl $1, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl $1, 160(%rsp)
movl $1, 164(%rsp)
leaq 136(%rsp), %rcx
leaq 128(%rsp), %rdx
leaq 156(%rsp), %rsi
leaq 144(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 344(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $360, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 136(%rsp)
.cfi_def_cfa_offset 376
pushq 136(%rsp)
.cfi_def_cfa_offset 384
leaq 208(%rsp), %r9
movq 172(%rsp), %rcx
movl 180(%rsp), %r8d
movq 160(%rsp), %rsi
movl 168(%rsp), %edx
leaq _Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 368
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z64__device_stub__Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, .-_Z64__device_stub__Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.globl _Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.type _Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, @function
_Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii:
.LFB2058:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
pushq 120(%rsp)
.cfi_def_cfa_offset 64
pushq 120(%rsp)
.cfi_def_cfa_offset 72
pushq 120(%rsp)
.cfi_def_cfa_offset 80
pushq 120(%rsp)
.cfi_def_cfa_offset 88
pushq 120(%rsp)
.cfi_def_cfa_offset 96
pushq 120(%rsp)
.cfi_def_cfa_offset 104
pushq 120(%rsp)
.cfi_def_cfa_offset 112
pushq 120(%rsp)
.cfi_def_cfa_offset 120
pushq 120(%rsp)
.cfi_def_cfa_offset 128
call _Z64__device_stub__Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
addq $120, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, .-_Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.globl _Z79__device_stub__Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.type _Z79__device_stub__Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, @function
_Z79__device_stub__Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii:
.LFB2059:
.cfi_startproc
endbr64
subq $360, %rsp
.cfi_def_cfa_offset 368
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movq 368(%rsp), %rax
movq %rax, 72(%rsp)
movq 376(%rsp), %rax
movq %rax, 64(%rsp)
movq 384(%rsp), %rax
movq %rax, 56(%rsp)
movq 392(%rsp), %rax
movq %rax, 48(%rsp)
movq 400(%rsp), %rax
movq %rax, 40(%rsp)
movq 408(%rsp), %rax
movq %rax, 32(%rsp)
movq 416(%rsp), %rax
movq %rax, 24(%rsp)
movq 424(%rsp), %rax
movq %rax, 16(%rsp)
movq 432(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 344(%rsp)
xorl %eax, %eax
leaq 120(%rsp), %rax
movq %rax, 192(%rsp)
leaq 112(%rsp), %rax
movq %rax, 200(%rsp)
leaq 104(%rsp), %rax
movq %rax, 208(%rsp)
leaq 96(%rsp), %rax
movq %rax, 216(%rsp)
leaq 88(%rsp), %rax
movq %rax, 224(%rsp)
leaq 80(%rsp), %rax
movq %rax, 232(%rsp)
leaq 72(%rsp), %rax
movq %rax, 240(%rsp)
leaq 64(%rsp), %rax
movq %rax, 248(%rsp)
leaq 56(%rsp), %rax
movq %rax, 256(%rsp)
leaq 48(%rsp), %rax
movq %rax, 264(%rsp)
leaq 40(%rsp), %rax
movq %rax, 272(%rsp)
leaq 32(%rsp), %rax
movq %rax, 280(%rsp)
leaq 24(%rsp), %rax
movq %rax, 288(%rsp)
leaq 16(%rsp), %rax
movq %rax, 296(%rsp)
leaq 8(%rsp), %rax
movq %rax, 304(%rsp)
leaq 440(%rsp), %rax
movq %rax, 312(%rsp)
leaq 448(%rsp), %rax
movq %rax, 320(%rsp)
leaq 456(%rsp), %rax
movq %rax, 328(%rsp)
leaq 464(%rsp), %rax
movq %rax, 336(%rsp)
movl $1, 144(%rsp)
movl $1, 148(%rsp)
movl $1, 152(%rsp)
movl $1, 156(%rsp)
movl $1, 160(%rsp)
movl $1, 164(%rsp)
leaq 136(%rsp), %rcx
leaq 128(%rsp), %rdx
leaq 156(%rsp), %rsi
leaq 144(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 344(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $360, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 136(%rsp)
.cfi_def_cfa_offset 376
pushq 136(%rsp)
.cfi_def_cfa_offset 384
leaq 208(%rsp), %r9
movq 172(%rsp), %rcx
movl 180(%rsp), %r8d
movq 160(%rsp), %rsi
movl 168(%rsp), %edx
leaq _Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 368
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z79__device_stub__Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, .-_Z79__device_stub__Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.globl _Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.type _Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, @function
_Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii:
.LFB2060:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
pushq 120(%rsp)
.cfi_def_cfa_offset 64
pushq 120(%rsp)
.cfi_def_cfa_offset 72
pushq 120(%rsp)
.cfi_def_cfa_offset 80
pushq 120(%rsp)
.cfi_def_cfa_offset 88
pushq 120(%rsp)
.cfi_def_cfa_offset 96
pushq 120(%rsp)
.cfi_def_cfa_offset 104
pushq 120(%rsp)
.cfi_def_cfa_offset 112
pushq 120(%rsp)
.cfi_def_cfa_offset 120
pushq 120(%rsp)
.cfi_def_cfa_offset 128
call _Z79__device_stub__Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiiiPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
addq $120, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, .-_Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.globl _Z76__device_stub__Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iiiPfPdS_S_S0_S_S0_S_iii
.type _Z76__device_stub__Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iiiPfPdS_S_S0_S_S0_S_iii, @function
_Z76__device_stub__Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iiiPfPdS_S_S0_S_S0_S_iii:
.LFB2061:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
movq %rdx, 40(%rsp)
movq %rcx, 32(%rsp)
movq %r8, 24(%rsp)
movq %r9, 16(%rsp)
movq 240(%rsp), %rax
movq %rax, 8(%rsp)
movq 248(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 32(%rsp), %rax
movq %rax, 152(%rsp)
leaq 24(%rsp), %rax
movq %rax, 160(%rsp)
leaq 16(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
movq %rsp, %rax
movq %rax, 184(%rsp)
leaq 256(%rsp), %rax
movq %rax, 192(%rsp)
leaq 264(%rsp), %rax
movq %rax, 200(%rsp)
leaq 272(%rsp), %rax
movq %rax, 208(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L47
.L43:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L48
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z76__device_stub__Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iiiPfPdS_S_S0_S_S0_S_iii, .-_Z76__device_stub__Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iiiPfPdS_S_S0_S_S0_S_iii
.globl _Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii
.type _Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii, @function
_Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii:
.LFB2062:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
pushq 56(%rsp)
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z76__device_stub__Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iiiPfPdS_S_S0_S_S0_S_iii
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii, .-_Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii
.globl _Z80__device_stub__Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iiiPfPdS_S_S0_S_S_S0_S_iii
.type _Z80__device_stub__Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iiiPfPdS_S_S0_S_S_S0_S_iii, @function
_Z80__device_stub__Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iiiPfPdS_S_S0_S_S_S0_S_iii:
.LFB2063:
.cfi_startproc
endbr64
subq $264, %rsp
.cfi_def_cfa_offset 272
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
movq %r8, 40(%rsp)
movq %r9, 32(%rsp)
movq 272(%rsp), %rax
movq %rax, 24(%rsp)
movq 280(%rsp), %rax
movq %rax, 16(%rsp)
movq 288(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
leaq 72(%rsp), %rax
movq %rax, 144(%rsp)
leaq 64(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rax
movq %rax, 160(%rsp)
leaq 48(%rsp), %rax
movq %rax, 168(%rsp)
leaq 40(%rsp), %rax
movq %rax, 176(%rsp)
leaq 32(%rsp), %rax
movq %rax, 184(%rsp)
leaq 24(%rsp), %rax
movq %rax, 192(%rsp)
leaq 16(%rsp), %rax
movq %rax, 200(%rsp)
leaq 8(%rsp), %rax
movq %rax, 208(%rsp)
leaq 296(%rsp), %rax
movq %rax, 216(%rsp)
leaq 304(%rsp), %rax
movq %rax, 224(%rsp)
leaq 312(%rsp), %rax
movq %rax, 232(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L55
.L51:
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L56
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 280
pushq 88(%rsp)
.cfi_def_cfa_offset 288
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 272
jmp .L51
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size _Z80__device_stub__Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iiiPfPdS_S_S0_S_S_S0_S_iii, .-_Z80__device_stub__Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iiiPfPdS_S_S0_S_S_S0_S_iii
.globl _Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii
.type _Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii, @function
_Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 56(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 56(%rsp)
.cfi_def_cfa_offset 48
pushq 56(%rsp)
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z80__device_stub__Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iiiPfPdS_S_S0_S_S_S0_S_iii
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii, .-_Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii"
.align 8
.LC1:
.string "_Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii"
.align 8
.LC2:
.string "_Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii"
.align 8
.LC3:
.string "_Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii"
.align 8
.LC4:
.string "_Z21model_fit_preparationPfS_S_S_S_PdS0_iii"
.align 8
.LC5:
.string "_Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii"
.align 8
.LC6:
.string "_Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2066:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z21model_fit_preparationPfS_S_S_S_PdS0_iii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Copyright 2020 Christopher Khan
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the license at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Description of GPU_kernels_single_precision.cu:
// This file contains the CUDA code that allows for performing the computations
// for GENRE on a GPU using single precision
// Define the GPU kernel that performs predictor normalization
__global__ void predictor_normalization(float * X_matrix_d, float * scaling_factors_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be normalized
int start_ind = 0;
// This if statement makes sure to not normalize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Normalize each predictor column so that the sum of the square of each predictor column is equal to 1
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the square of the predictor column
float sum_squared = 0.0f;
// Calculate the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_squared = sum_squared + (X_value * X_value);
}
// Calculate the square root of the sum of the square of the predictor column
float square_root_sum_squared = sqrtf(sum_squared);
// Store the square root of the sum of the square of the predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = square_root_sum_squared;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] / square_root_sum_squared;
}
}
// This if statement stores a scaling factor of 1 for the predictor column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that performs predictor standardization
__global__ void predictor_standardization(float * X_matrix_d, float * scaling_factors_d, float * mean_X_matrix_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Standardize each predictor column by subtracting the mean of the predictor column from each observation and diving each observation by the standard deviation of the predictor column
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the predictor column
float sum_value = 0.0f;
// Calculate the sum of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_value = sum_value + X_value;
}
// Calculate the mean of the predictor column
float mean_value = sum_value / (float)num_observations;
// Store the mean of the predictor column
mean_X_matrix_d[predictor_thread_stride + predictor_column] = mean_value;
// Declare and initialize the variable that stores the sum of the square of the demeaned predictor column
float sum_squared = 0.0f;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value_demeaned = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value;
sum_squared = sum_squared + (X_value_demeaned * X_value_demeaned);
}
// Calculate the standard deviation of the demeaned predictor column
float std = sqrtf(sum_squared / (float)num_observations);
// Store the standard deviation of the demeaned predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = std;
// Standardize the predictor column by subtracting its mean and dividing by its standard deviation
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = (X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value) / std;
}
}
// This if statement stores a scaling factor of 1 and a mean of 1 for the first column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
mean_X_matrix_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that calculates the standard deviations for each portion of the y_d array, standardizes the y_d array, and calculates the standardized lambda values
__global__ void model_fit_preparation(float * y_d, float * residual_y_d, float * model_fit_flag_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * observation_thread_stride_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations in the cropped_y_d array for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Declare and initialize the variable that stores the running sum of y for the fit
float sum_value = 0.0f;
// Calculate the running sums for sum_value
for (int observation = 0; observation < num_observations; observation++) {
float value = y_d[observation_thread_stride + observation];
sum_value += value;
}
// Calculate the mean of y for the fit
float mean = sum_value / (float)num_observations;
// Declare and initialize the variable that stores the standard deviation of y for the fit
float std = 0.0f;
// Calculate the standard deviation of y for the fit
for (int observation = 0; observation < num_observations; observation++) {
float value_2 = y_d[observation_thread_stride + observation];
std += ((value_2 - mean) * (value_2 - mean));
}
std = sqrtf(std / (float)num_observations);
// Store the standard deviation of y for the fit in the y_std_d array
y_std_d[fit_ind] = std;
// This if statement standardizes the lambda values and the y data if the standard deviation isn't 0
if (std != 0.0f) {
// Set the model fit flag to 1 if the standard deviation is not 0 and a model fit should be performed
model_fit_flag_d[fit_ind] = 1.0f;
// Calculate the standardized lambda value and store it into the standardized_lambda_d array
standardized_lambda_values_d[fit_ind] = standardized_lambda_values_d[fit_ind] / std;
// Standardize y for the fit and store it into the y_d array and the residual_y_d array
for (int observation = 0; observation < num_observations; observation++) {
float standardized_value = y_d[observation_thread_stride + observation] / std;
y_d[observation_thread_stride + observation] = standardized_value;
residual_y_d[observation_thread_stride + observation] = standardized_value;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = residual_y_d[observation_thread_stride + observation_row] + (X_value * B_j);
// Store the updated residual value back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = residual_y_d[observation_thread_stride + observation_row];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_d[observation_thread_stride + observation_row] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit_shared_memory(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Define the shared memory array that stores the residual values of the model fits within one block (the amount of bytes is declared in the GPU kernel call)
extern __shared__ float sdata[];
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Store the residual values for the fit into the shared memory array
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
int store_ind = (observation_row * num_threads_per_block) + block_thread_ind;
sdata[store_ind] = residual_y_d[observation_thread_stride + observation_row];
}
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind] + (X_value * B_j);
// Store the updated residual value back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = sdata[(observation_row * num_threads_per_block) + block_thread_ind] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unnormalization
__global__ void predictor_coefficient_unnormalization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unnormalized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Unnormalize the predictor coefficients
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
B_d[predictor_thread_stride + predictor_column] = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unstandardization
__global__ void predictor_coefficient_unstandardization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, float * mean_X_matrix_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unstandardized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Declare and initialize the variable that is used to adjust the intercept term if it is included
float sum_value = 0.0f;
// Perform predictor coefficient unstandardization
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
float B_unstandardized = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
B_d[predictor_thread_stride + predictor_column] = B_unstandardized;
sum_value = sum_value + (B_unstandardized * mean_X_matrix_d[predictor_thread_stride + predictor_column]);
}
// Adjust the intercept term if it is included
if (intercept_flag == 1) {
B_d[predictor_thread_stride] = B_d[predictor_thread_stride] - sum_value;
}
}
}
} | #include <hip/hip_runtime.h>
// Copyright 2020 Christopher Khan
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the license at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Description of GPU_kernels_single_precision.cu:
// This file contains the CUDA code that allows for performing the computations
// for GENRE on a GPU using single precision
// Define the GPU kernel that performs predictor normalization
__global__ void predictor_normalization(float * X_matrix_d, float * scaling_factors_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be normalized
int start_ind = 0;
// This if statement makes sure to not normalize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Normalize each predictor column so that the sum of the square of each predictor column is equal to 1
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the square of the predictor column
float sum_squared = 0.0f;
// Calculate the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_squared = sum_squared + (X_value * X_value);
}
// Calculate the square root of the sum of the square of the predictor column
float square_root_sum_squared = sqrtf(sum_squared);
// Store the square root of the sum of the square of the predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = square_root_sum_squared;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] / square_root_sum_squared;
}
}
// This if statement stores a scaling factor of 1 for the predictor column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that performs predictor standardization
__global__ void predictor_standardization(float * X_matrix_d, float * scaling_factors_d, float * mean_X_matrix_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Standardize each predictor column by subtracting the mean of the predictor column from each observation and diving each observation by the standard deviation of the predictor column
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the predictor column
float sum_value = 0.0f;
// Calculate the sum of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_value = sum_value + X_value;
}
// Calculate the mean of the predictor column
float mean_value = sum_value / (float)num_observations;
// Store the mean of the predictor column
mean_X_matrix_d[predictor_thread_stride + predictor_column] = mean_value;
// Declare and initialize the variable that stores the sum of the square of the demeaned predictor column
float sum_squared = 0.0f;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value_demeaned = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value;
sum_squared = sum_squared + (X_value_demeaned * X_value_demeaned);
}
// Calculate the standard deviation of the demeaned predictor column
float std = sqrtf(sum_squared / (float)num_observations);
// Store the standard deviation of the demeaned predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = std;
// Standardize the predictor column by subtracting its mean and dividing by its standard deviation
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = (X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value) / std;
}
}
// This if statement stores a scaling factor of 1 and a mean of 1 for the first column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
mean_X_matrix_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that calculates the standard deviations for each portion of the y_d array, standardizes the y_d array, and calculates the standardized lambda values
__global__ void model_fit_preparation(float * y_d, float * residual_y_d, float * model_fit_flag_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * observation_thread_stride_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations in the cropped_y_d array for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Declare and initialize the variable that stores the running sum of y for the fit
float sum_value = 0.0f;
// Calculate the running sums for sum_value
for (int observation = 0; observation < num_observations; observation++) {
float value = y_d[observation_thread_stride + observation];
sum_value += value;
}
// Calculate the mean of y for the fit
float mean = sum_value / (float)num_observations;
// Declare and initialize the variable that stores the standard deviation of y for the fit
float std = 0.0f;
// Calculate the standard deviation of y for the fit
for (int observation = 0; observation < num_observations; observation++) {
float value_2 = y_d[observation_thread_stride + observation];
std += ((value_2 - mean) * (value_2 - mean));
}
std = sqrtf(std / (float)num_observations);
// Store the standard deviation of y for the fit in the y_std_d array
y_std_d[fit_ind] = std;
// This if statement standardizes the lambda values and the y data if the standard deviation isn't 0
if (std != 0.0f) {
// Set the model fit flag to 1 if the standard deviation is not 0 and a model fit should be performed
model_fit_flag_d[fit_ind] = 1.0f;
// Calculate the standardized lambda value and store it into the standardized_lambda_d array
standardized_lambda_values_d[fit_ind] = standardized_lambda_values_d[fit_ind] / std;
// Standardize y for the fit and store it into the y_d array and the residual_y_d array
for (int observation = 0; observation < num_observations; observation++) {
float standardized_value = y_d[observation_thread_stride + observation] / std;
y_d[observation_thread_stride + observation] = standardized_value;
residual_y_d[observation_thread_stride + observation] = standardized_value;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = residual_y_d[observation_thread_stride + observation_row] + (X_value * B_j);
// Store the updated residual value back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = residual_y_d[observation_thread_stride + observation_row];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_d[observation_thread_stride + observation_row] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit_shared_memory(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Define the shared memory array that stores the residual values of the model fits within one block (the amount of bytes is declared in the GPU kernel call)
extern __shared__ float sdata[];
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Store the residual values for the fit into the shared memory array
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
int store_ind = (observation_row * num_threads_per_block) + block_thread_ind;
sdata[store_ind] = residual_y_d[observation_thread_stride + observation_row];
}
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind] + (X_value * B_j);
// Store the updated residual value back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = sdata[(observation_row * num_threads_per_block) + block_thread_ind] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unnormalization
__global__ void predictor_coefficient_unnormalization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unnormalized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Unnormalize the predictor coefficients
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
B_d[predictor_thread_stride + predictor_column] = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unstandardization
__global__ void predictor_coefficient_unstandardization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, float * mean_X_matrix_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unstandardized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Declare and initialize the variable that is used to adjust the intercept term if it is included
float sum_value = 0.0f;
// Perform predictor coefficient unstandardization
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
float B_unstandardized = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
B_d[predictor_thread_stride + predictor_column] = B_unstandardized;
sum_value = sum_value + (B_unstandardized * mean_X_matrix_d[predictor_thread_stride + predictor_column]);
}
// Adjust the intercept term if it is included
if (intercept_flag == 1) {
B_d[predictor_thread_stride] = B_d[predictor_thread_stride] - sum_value;
}
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
// Copyright 2020 Christopher Khan
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the license at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Description of GPU_kernels_single_precision.cu:
// This file contains the CUDA code that allows for performing the computations
// for GENRE on a GPU using single precision
// Define the GPU kernel that performs predictor normalization
__global__ void predictor_normalization(float * X_matrix_d, float * scaling_factors_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be normalized
int start_ind = 0;
// This if statement makes sure to not normalize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Normalize each predictor column so that the sum of the square of each predictor column is equal to 1
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the square of the predictor column
float sum_squared = 0.0f;
// Calculate the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_squared = sum_squared + (X_value * X_value);
}
// Calculate the square root of the sum of the square of the predictor column
float square_root_sum_squared = sqrtf(sum_squared);
// Store the square root of the sum of the square of the predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = square_root_sum_squared;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] / square_root_sum_squared;
}
}
// This if statement stores a scaling factor of 1 for the predictor column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that performs predictor standardization
__global__ void predictor_standardization(float * X_matrix_d, float * scaling_factors_d, float * mean_X_matrix_d, double * X_matrix_thread_stride_d, double * B_thread_stride_d, double * num_observations_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Standardize each predictor column by subtracting the mean of the predictor column from each observation and diving each observation by the standard deviation of the predictor column
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
// Declare and initialize the variable that stores the sum of the predictor column
float sum_value = 0.0f;
// Calculate the sum of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row];
sum_value = sum_value + X_value;
}
// Calculate the mean of the predictor column
float mean_value = sum_value / (float)num_observations;
// Store the mean of the predictor column
mean_X_matrix_d[predictor_thread_stride + predictor_column] = mean_value;
// Declare and initialize the variable that stores the sum of the square of the demeaned predictor column
float sum_squared = 0.0f;
// Normalize the predictor column by dividing each observation in the predictor column by the square root of the sum of the square of the predictor column
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
float X_value_demeaned = X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value;
sum_squared = sum_squared + (X_value_demeaned * X_value_demeaned);
}
// Calculate the standard deviation of the demeaned predictor column
float std = sqrtf(sum_squared / (float)num_observations);
// Store the standard deviation of the demeaned predictor column
scaling_factors_d[predictor_thread_stride + predictor_column] = std;
// Standardize the predictor column by subtracting its mean and dividing by its standard deviation
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] = (X_matrix_d[X_thread_stride + (predictor_column * num_observations) + observation_row] - mean_value) / std;
}
}
// This if statement stores a scaling factor of 1 and a mean of 1 for the first column if it corresponds to an intercept term
if (intercept_flag == 1) {
scaling_factors_d[predictor_thread_stride] = 1.0f;
mean_X_matrix_d[predictor_thread_stride] = 1.0f;
}
}
}
// Define the GPU kernel that calculates the standard deviations for each portion of the y_d array, standardizes the y_d array, and calculates the standardized lambda values
__global__ void model_fit_preparation(float * y_d, float * residual_y_d, float * model_fit_flag_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * observation_thread_stride_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations in the cropped_y_d array for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Declare and initialize the variable that stores the running sum of y for the fit
float sum_value = 0.0f;
// Calculate the running sums for sum_value
for (int observation = 0; observation < num_observations; observation++) {
float value = y_d[observation_thread_stride + observation];
sum_value += value;
}
// Calculate the mean of y for the fit
float mean = sum_value / (float)num_observations;
// Declare and initialize the variable that stores the standard deviation of y for the fit
float std = 0.0f;
// Calculate the standard deviation of y for the fit
for (int observation = 0; observation < num_observations; observation++) {
float value_2 = y_d[observation_thread_stride + observation];
std += ((value_2 - mean) * (value_2 - mean));
}
std = sqrtf(std / (float)num_observations);
// Store the standard deviation of y for the fit in the y_std_d array
y_std_d[fit_ind] = std;
// This if statement standardizes the lambda values and the y data if the standard deviation isn't 0
if (std != 0.0f) {
// Set the model fit flag to 1 if the standard deviation is not 0 and a model fit should be performed
model_fit_flag_d[fit_ind] = 1.0f;
// Calculate the standardized lambda value and store it into the standardized_lambda_d array
standardized_lambda_values_d[fit_ind] = standardized_lambda_values_d[fit_ind] / std;
// Standardize y for the fit and store it into the y_d array and the residual_y_d array
for (int observation = 0; observation < num_observations; observation++) {
float standardized_value = y_d[observation_thread_stride + observation] / std;
y_d[observation_thread_stride + observation] = standardized_value;
residual_y_d[observation_thread_stride + observation] = standardized_value;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = residual_y_d[observation_thread_stride + observation_row] + (X_value * B_j);
// Store the updated residual value back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = residual_y_d[observation_thread_stride + observation_row];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the residual_y_d array
residual_y_d[observation_thread_stride + observation_row] = residual_y_d[observation_thread_stride + observation_row] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs least-squares regression with elastic-net regularization using the cyclic coordinate descent optimization algorithm in order to fit the model matrices to the data
__global__ void model_fit_shared_memory(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, double * observation_thread_stride_d, float * residual_y_d, float * y_std_d, float * standardized_lambda_values_d, double * num_observations_d, double * num_predictors_d, float * alpha_values_d, float * tolerance_values_d, float * max_iterations_values_d, float * intercept_flag_d, int transformation_flag, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Define the shared memory array that stores the residual values of the model fits within one block (the amount of bytes is declared in the GPU kernel call)
extern __shared__ float sdata[];
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less threads
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether to perform a model fit or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that a model fit is performed only if the model fit flag is 1
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct model matrix for the fit
int X_thread_stride = (int)X_matrix_thread_stride_d[fit_ind];
// Obtain the thread stride that is used to obtain the correct set of observations for the fit
int observation_thread_stride = (int)observation_thread_stride_d[fit_ind];
// Obtain the alpha value for the fit
float alpha = alpha_values_d[fit_ind];
// Obtain the standardized lambda value for the fit
float lambda = standardized_lambda_values_d[fit_ind];
// Obtain the tolerance value for the fit
float tolerance = tolerance_values_d[fit_ind];
// Obtain the max iterations value for the fit
int max_iterations = (int)max_iterations_values_d[fit_ind];
// Obtain the number of observations for the fit
int num_observations = (int)num_observations_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float global_max_change = 1E12;
// Declare and initialize the variable that counts how many iterations of cyclic coordinate descent have been performed
int iteration_count = 0;
// Store the residual values for the fit into the shared memory array
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
int store_ind = (observation_row * num_threads_per_block) + block_thread_ind;
sdata[store_ind] = residual_y_d[observation_thread_stride + observation_row];
}
// Perform cyclic coordinate descent until either the maximum number of iterations is reached or the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values becomes less than the tolerance
while (global_max_change >= tolerance && iteration_count < max_iterations) {
// Declare and initialize the variable that stores the maximum weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values for one iteration of cyclic coordinate descent
float max_change = 0.0f;
// Declare and initialize the variable that stores the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values that are due to the current predictor coefficient value being updated using cyclic coordinate descent
float change = 0.0f;
// Cycle through all of the predictors for one iteration of cyclic coordinate descent
for (int j = 0; j < num_predictors; j++) {
// Obtain the predictor coefficient value for the current predictor
float B_j = B_d[predictor_thread_stride + j];
// Store the predictor coefficent value before it's updated
float previous_B_j = B_j;
// Declare and initialize the variable that stores the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
float p_j = 0.0f;
// Calculate the residual values leaving the current predictor out (the predictor coefficients are initialized to zero, so the residual values are going to initially be y)
// This if-else statement accounts for the fact that the contribution of the current predictor only needs to be removed from the residual values if the predictor coefficient is not zero
// This is due to the fact that if the predictor coefficient is already zero, then the predictor contribution to the residual is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Remove the contribution of the current predictor from the current residual value
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind] + (X_value * B_j);
// Store the updated residual value back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = residual_y_value;
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
} else {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Obtain the correct value from the model matrix for the current predictor
float X_value = X_matrix_d[X_thread_stride + (j * num_observations) + observation_row];
// Obtain the residual value (this is essentially the residual value leaving the current predictor out because the predictor coefficient value is zero)
float residual_y_value = sdata[(observation_row * num_threads_per_block) + block_thread_ind];
// Compute the correlation between the current predictor column and the residual values that are obtained leaving the current predictor out
// The correlation is computed as a running sum
p_j = p_j + (X_value * residual_y_value);
}
}
// Divide the computed correlation by the total number of observations in y (also the total number of observations in one predictor column)
p_j = (1.0f / (float)num_observations) * p_j;
// Apply the soft-thresholding function that is associated with the L1-regularization component of elastic-net regularization
float gamma = lambda * alpha;
if (p_j > 0.0f && gamma < fabsf(p_j)) {
B_j = p_j - gamma;
} else if (p_j < 0.0f && gamma < fabsf(p_j)) {
B_j = p_j + gamma;
} else {
B_j = 0.0f;
}
// Declare and initialize the mean of the square of the predictor column
float mean_squared_predictor_value = 0.0f;
// Obtain the mean of the square of the predictor column
if (transformation_flag == 1 || transformation_flag == 3) {
mean_squared_predictor_value = 1.0f;
} else if (transformation_flag == 2 || transformation_flag == 4) {
mean_squared_predictor_value = 1.0f / (float)num_observations;
}
// This if-else statemet accounts for the fact that regularization is not applied to the intercept term if one is included
if (intercept_flag == 1 && j == 0) {
// Use the computed correlation value as the updated predictor coefficient
B_j = p_j;
} else {
// Calculate the updated predictor coefficient value by applying the component of elastic-net regularization that is associated with L2-regularization
// The mean_squared_predictor_value term comes from the derivation of the coordinate descent update for a predictor coefficient
B_j = B_j / (mean_squared_predictor_value + (lambda * (1.0f - alpha)));
}
// Store the updated predictor coefficient value into the B_d array
B_d[predictor_thread_stride + j] = B_j;
// Update the residual values to include the contribution of the current predictor using the updated predictor coefficient value
// If the updated predictor coefficient value is 0, then its contribution to the residual values is zero
if (B_j != 0.0f) {
for (int observation_row = 0; observation_row < num_observations; observation_row++) {
// Store the updated residual back into the shared memory array
sdata[(observation_row * num_threads_per_block) + block_thread_ind] = sdata[(observation_row * num_threads_per_block) + block_thread_ind] - (X_matrix_d[X_thread_stride + (j * num_observations) + observation_row] * B_j);
}
}
// Compute the weighted (observation weights are all 1 in this case) sum of squares of the changes in the fitted values (this is used for the tolerance convergence criterion)
change = (previous_B_j - B_j) * (previous_B_j - B_j);
if (transformation_flag == 2 || transformation_flag == 4) {
if (intercept_flag == 1 && j > 0) {
change = (1.0f / (float)num_observations) * change;
} else if (intercept_flag == 0) {
change = (1.0f / (float)num_observations) * change;
}
}
if (change > max_change) {
max_change = change;
}
}
// Update the global_max_change variable
global_max_change = max_change;
// Update the iteration count variable
iteration_count = iteration_count + 1;
}
// Account for the fact that the y in the model fit was divided by its standard deviation
float std_y = y_std_d[fit_ind];
for (int j = 0; j < num_predictors; j++) {
B_d[predictor_thread_stride + j] = B_d[predictor_thread_stride + j] * std_y;
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unnormalization
__global__ void predictor_coefficient_unnormalization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unnormalized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Unnormalize the predictor coefficients
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
B_d[predictor_thread_stride + predictor_column] = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
}
}
}
}
// Define the GPU kernel that performs predictor coefficient unstandardization
__global__ void predictor_coefficient_unstandardization(float * B_d, double * B_thread_stride_d, float * model_fit_flag_d, float * X_matrix_d, double * X_matrix_thread_stride_d, float * scaling_factors_d, float * mean_X_matrix_d, double * num_predictors_d, float * intercept_flag_d, int num_threads_per_block, int num_blocks, int num_threads_last_block) {
// Obtain the index of the block
int block_ind = blockIdx.x;
// Obtain the thread index within one block
int block_thread_ind = threadIdx.x;
// Calculate the fit index
int fit_ind = (block_ind * num_threads_per_block) + block_thread_ind;
// Determine how many threads are in the block (accounts for the fact that the last block may contain less active threads than the other blocks)
int num_threads_per_block_2 = num_threads_per_block;
if (block_ind == (num_blocks - 1)) {
num_threads_per_block_2 = num_threads_last_block;
}
// This if statement makes sure that extra threads aren't doing data processing if the last block has less fits to perform
if (block_thread_ind < num_threads_per_block_2) {
// Obtain the flag that determines whether a model fit was performed or not
int model_fit_flag = (int)model_fit_flag_d[fit_ind];
// This if statement is to ensure that the coefficients are unstandardized only if a model fit was performed
if (model_fit_flag == 1) {
// Obtain the thread stride that is used to obtain the correct set of predictors for the fit
int predictor_thread_stride = (int)B_thread_stride_d[fit_ind];
// Obtain the number of predictors for the fit
int num_predictors = (int)num_predictors_d[fit_ind];
// Obtain the flag that determines whether the first predictor column is a column of ones for the intercept term or not
int intercept_flag = (int)intercept_flag_d[fit_ind];
// Declare and initialize the variable that stores the number of the first predictor column to be standardized
int start_ind = 0;
// This if statement makes sure to not standardize the first predictor column if it corresponds to the intercept term
if (intercept_flag == 1) {
start_ind = 1;
}
// Declare and initialize the variable that is used to adjust the intercept term if it is included
float sum_value = 0.0f;
// Perform predictor coefficient unstandardization
for (int predictor_column = start_ind; predictor_column < num_predictors; predictor_column++) {
float B_unstandardized = B_d[predictor_thread_stride + predictor_column] / scaling_factors_d[predictor_thread_stride + predictor_column];
B_d[predictor_thread_stride + predictor_column] = B_unstandardized;
sum_value = sum_value + (B_unstandardized * mean_X_matrix_d[predictor_thread_stride + predictor_column]);
}
// Adjust the intercept term if it is included
if (intercept_flag == 1) {
B_d[predictor_thread_stride] = B_d[predictor_thread_stride] - sum_value;
}
}
}
} | .text
.file "GPU_kernels_single_precision.hip"
.globl _Z38__device_stub__predictor_normalizationPfS_PdS0_S0_S0_S_iii # -- Begin function _Z38__device_stub__predictor_normalizationPfS_PdS0_S0_S0_S_iii
.p2align 4, 0x90
.type _Z38__device_stub__predictor_normalizationPfS_PdS0_S0_S0_S_iii,@function
_Z38__device_stub__predictor_normalizationPfS_PdS0_S0_S0_S_iii: # @_Z38__device_stub__predictor_normalizationPfS_PdS0_S0_S0_S_iii
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end0:
.size _Z38__device_stub__predictor_normalizationPfS_PdS0_S0_S0_S_iii, .Lfunc_end0-_Z38__device_stub__predictor_normalizationPfS_PdS0_S0_S0_S_iii
.cfi_endproc
# -- End function
.globl _Z40__device_stub__predictor_standardizationPfS_S_PdS0_S0_S0_S_iii # -- Begin function _Z40__device_stub__predictor_standardizationPfS_S_PdS0_S0_S0_S_iii
.p2align 4, 0x90
.type _Z40__device_stub__predictor_standardizationPfS_S_PdS0_S0_S0_S_iii,@function
_Z40__device_stub__predictor_standardizationPfS_S_PdS0_S0_S0_S_iii: # @_Z40__device_stub__predictor_standardizationPfS_S_PdS0_S0_S0_S_iii
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
leaq 224(%rsp), %rax
movq %rax, 176(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end1:
.size _Z40__device_stub__predictor_standardizationPfS_S_PdS0_S0_S0_S_iii, .Lfunc_end1-_Z40__device_stub__predictor_standardizationPfS_S_PdS0_S0_S0_S_iii
.cfi_endproc
# -- End function
.globl _Z36__device_stub__model_fit_preparationPfS_S_S_S_PdS0_iii # -- Begin function _Z36__device_stub__model_fit_preparationPfS_S_S_S_PdS0_iii
.p2align 4, 0x90
.type _Z36__device_stub__model_fit_preparationPfS_S_S_S_PdS0_iii,@function
_Z36__device_stub__model_fit_preparationPfS_S_S_S_PdS0_iii: # @_Z36__device_stub__model_fit_preparationPfS_S_S_S_PdS0_iii
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21model_fit_preparationPfS_S_S_S_PdS0_iii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end2:
.size _Z36__device_stub__model_fit_preparationPfS_S_S_S_PdS0_iii, .Lfunc_end2-_Z36__device_stub__model_fit_preparationPfS_S_S_S_PdS0_iii
.cfi_endproc
# -- End function
.globl _Z24__device_stub__model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii # -- Begin function _Z24__device_stub__model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.p2align 4, 0x90
.type _Z24__device_stub__model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii,@function
_Z24__device_stub__model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii: # @_Z24__device_stub__model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 256(%rsp), %rax
movq %rax, 144(%rsp)
leaq 264(%rsp), %rax
movq %rax, 152(%rsp)
leaq 272(%rsp), %rax
movq %rax, 160(%rsp)
leaq 280(%rsp), %rax
movq %rax, 168(%rsp)
leaq 288(%rsp), %rax
movq %rax, 176(%rsp)
leaq 296(%rsp), %rax
movq %rax, 184(%rsp)
leaq 304(%rsp), %rax
movq %rax, 192(%rsp)
leaq 312(%rsp), %rax
movq %rax, 200(%rsp)
leaq 320(%rsp), %rax
movq %rax, 208(%rsp)
leaq 328(%rsp), %rax
movq %rax, 216(%rsp)
leaq 336(%rsp), %rax
movq %rax, 224(%rsp)
leaq 344(%rsp), %rax
movq %rax, 232(%rsp)
leaq 352(%rsp), %rax
movq %rax, 240(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end3:
.size _Z24__device_stub__model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, .Lfunc_end3-_Z24__device_stub__model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.cfi_endproc
# -- End function
.globl _Z38__device_stub__model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii # -- Begin function _Z38__device_stub__model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.p2align 4, 0x90
.type _Z38__device_stub__model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii,@function
_Z38__device_stub__model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii: # @_Z38__device_stub__model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 256(%rsp), %rax
movq %rax, 144(%rsp)
leaq 264(%rsp), %rax
movq %rax, 152(%rsp)
leaq 272(%rsp), %rax
movq %rax, 160(%rsp)
leaq 280(%rsp), %rax
movq %rax, 168(%rsp)
leaq 288(%rsp), %rax
movq %rax, 176(%rsp)
leaq 296(%rsp), %rax
movq %rax, 184(%rsp)
leaq 304(%rsp), %rax
movq %rax, 192(%rsp)
leaq 312(%rsp), %rax
movq %rax, 200(%rsp)
leaq 320(%rsp), %rax
movq %rax, 208(%rsp)
leaq 328(%rsp), %rax
movq %rax, 216(%rsp)
leaq 336(%rsp), %rax
movq %rax, 224(%rsp)
leaq 344(%rsp), %rax
movq %rax, 232(%rsp)
leaq 352(%rsp), %rax
movq %rax, 240(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end4:
.size _Z38__device_stub__model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, .Lfunc_end4-_Z38__device_stub__model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.cfi_endproc
# -- End function
.globl _Z52__device_stub__predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii # -- Begin function _Z52__device_stub__predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii
.p2align 4, 0x90
.type _Z52__device_stub__predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii,@function
_Z52__device_stub__predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii: # @_Z52__device_stub__predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 216(%rsp), %rax
movq %rax, 168(%rsp)
leaq 224(%rsp), %rax
movq %rax, 176(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end5:
.size _Z52__device_stub__predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii, .Lfunc_end5-_Z52__device_stub__predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii
.cfi_endproc
# -- End function
.globl _Z54__device_stub__predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii # -- Begin function _Z54__device_stub__predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii
.p2align 4, 0x90
.type _Z54__device_stub__predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii,@function
_Z54__device_stub__predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii: # @_Z54__device_stub__predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii
.cfi_startproc
# %bb.0:
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 208(%rsp), %rax
movq %rax, 144(%rsp)
leaq 216(%rsp), %rax
movq %rax, 152(%rsp)
leaq 224(%rsp), %rax
movq %rax, 160(%rsp)
leaq 232(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 248(%rsp), %rax
movq %rax, 184(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $216, %rsp
.cfi_adjust_cfa_offset -216
retq
.Lfunc_end6:
.size _Z54__device_stub__predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii, .Lfunc_end6-_Z54__device_stub__predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21model_fit_preparationPfS_S_S_S_PdS0_iii, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii,@object # @_Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii
.section .rodata,"a",@progbits
.globl _Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii
.p2align 3, 0x0
_Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii:
.quad _Z38__device_stub__predictor_normalizationPfS_PdS0_S0_S0_S_iii
.size _Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii, 8
.type _Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii,@object # @_Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii
.globl _Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii
.p2align 3, 0x0
_Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii:
.quad _Z40__device_stub__predictor_standardizationPfS_S_PdS0_S0_S0_S_iii
.size _Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii, 8
.type _Z21model_fit_preparationPfS_S_S_S_PdS0_iii,@object # @_Z21model_fit_preparationPfS_S_S_S_PdS0_iii
.globl _Z21model_fit_preparationPfS_S_S_S_PdS0_iii
.p2align 3, 0x0
_Z21model_fit_preparationPfS_S_S_S_PdS0_iii:
.quad _Z36__device_stub__model_fit_preparationPfS_S_S_S_PdS0_iii
.size _Z21model_fit_preparationPfS_S_S_S_PdS0_iii, 8
.type _Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii,@object # @_Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.globl _Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.p2align 3, 0x0
_Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii:
.quad _Z24__device_stub__model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.size _Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, 8
.type _Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii,@object # @_Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.globl _Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.p2align 3, 0x0
_Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii:
.quad _Z38__device_stub__model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.size _Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii, 8
.type _Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii,@object # @_Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii
.globl _Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii
.p2align 3, 0x0
_Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii:
.quad _Z52__device_stub__predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii
.size _Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii, 8
.type _Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii,@object # @_Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii
.globl _Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii
.p2align 3, 0x0
_Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii:
.quad _Z54__device_stub__predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii
.size _Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii"
.size .L__unnamed_1, 48
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii"
.size .L__unnamed_2, 52
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z21model_fit_preparationPfS_S_S_S_PdS0_iii"
.size .L__unnamed_3, 44
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii"
.size .L__unnamed_4, 51
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii"
.size .L__unnamed_5, 66
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "_Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii"
.size .L__unnamed_6, 63
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "_Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii"
.size .L__unnamed_7, 67
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__predictor_normalizationPfS_PdS0_S0_S0_S_iii
.addrsig_sym _Z40__device_stub__predictor_standardizationPfS_S_PdS0_S0_S0_S_iii
.addrsig_sym _Z36__device_stub__model_fit_preparationPfS_S_S_S_PdS0_iii
.addrsig_sym _Z24__device_stub__model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.addrsig_sym _Z38__device_stub__model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.addrsig_sym _Z52__device_stub__predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii
.addrsig_sym _Z54__device_stub__predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23predictor_normalizationPfS_PdS0_S0_S0_S_iii
.addrsig_sym _Z25predictor_standardizationPfS_S_PdS0_S0_S0_S_iii
.addrsig_sym _Z21model_fit_preparationPfS_S_S_S_PdS0_iii
.addrsig_sym _Z9model_fitPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.addrsig_sym _Z23model_fit_shared_memoryPfPdS_S_S0_S0_S_S_S_S0_S0_S_S_S_S_iiii
.addrsig_sym _Z37predictor_coefficient_unnormalizationPfPdS_S_S0_S_S0_S_iii
.addrsig_sym _Z39predictor_coefficient_unstandardizationPfPdS_S_S0_S_S_S0_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void MatrixMultiplication__CudaKernel(int* in_tabA, int* in_tabB, int* out_tabC, int outTabWidth)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
//making sure that extra threads will do not any work
if (row < outTabWidth && col < outTabWidth)
{
int tmp_sum = 0;
//#pragma unroll
for (int i = 0; i < outTabWidth; i++)
{
tmp_sum += in_tabA[row * outTabWidth + i] * in_tabB[i * outTabWidth + col];
}
out_tabC[row * outTabWidth + col] = tmp_sum;
}
} | code for sm_80
Function : _Z32MatrixMultiplication__CudaKernelPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e280000002200 */
/*0030*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e680000002500 */
/*0040*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R3, c[0x0][0x0], R2 ; /* 0x0000000003037a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R2, c[0x0][0x178] ; /* 0x00005e0000027a02 */
/* 0x000fe20000000f00 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R28, -RZ, RZ, 0, 0 ; /* 0x00000000ff1c7435 */
/* 0x000fe200000001ff */
/*00d0*/ IMAD R3, R3, c[0x0][0x178], RZ ; /* 0x00005e0003037a24 */
/* 0x000fe200078e02ff */
/*00e0*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fda0003f06270 */
/*00f0*/ @!P0 BRA 0xbf0 ; /* 0x00000af000008947 */
/* 0x000fea0003800000 */
/*0100*/ IADD3 R4, R2.reuse, -0x1, RZ ; /* 0xffffffff02047810 */
/* 0x040fe40007ffe0ff */
/*0110*/ LOP3.LUT R5, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302057812 */
/* 0x000fe400078ec0ff */
/*0120*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe40003f06070 */
/*0130*/ MOV R4, RZ ; /* 0x000000ff00047202 */
/* 0x000fe40000000f00 */
/*0140*/ MOV R28, RZ ; /* 0x000000ff001c7202 */
/* 0x000fd20000000f00 */
/*0150*/ @!P0 BRA 0xaf0 ; /* 0x0000099000008947 */
/* 0x000fea0003800000 */
/*0160*/ IADD3 R6, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005067a10 */
/* 0x000fe20007ffe1ff */
/*0170*/ HFMA2.MMA R25, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff197435 */
/* 0x000fe200000001ff */
/*0180*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0190*/ MOV R4, RZ ; /* 0x000000ff00047202 */
/* 0x000fe40000000f00 */
/*01a0*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fcc0003f04270 */
/*01b0*/ IMAD.WIDE R24, R0, R25, c[0x0][0x168] ; /* 0x00005a0000187625 */
/* 0x000fce00078e0219 */
/*01c0*/ @!P0 BRA 0x960 ; /* 0x0000079000008947 */
/* 0x000fea0003800000 */
/*01d0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe40003f24270 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01f0*/ @!P1 BRA 0x6a0 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0210*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0220*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x0000a2000c1e1900 */
/*0230*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0240*/ IMAD.WIDE R12, R3, 0x4, R12 ; /* 0x00000004030c7825 */
/* 0x000fca00078e020c */
/*0250*/ LDG.E R27, [R12.64] ; /* 0x000000040c1b7981 */
/* 0x000ea2000c1e1900 */
/*0260*/ IMAD.WIDE R10, R2, 0x4, R24 ; /* 0x00000004020a7825 */
/* 0x000fc600078e0218 */
/*0270*/ LDG.E R17, [R12.64+0x4] ; /* 0x000004040c117981 */
/* 0x000ee6000c1e1900 */
/*0280*/ IMAD.WIDE R18, R2.reuse, 0x4, R10 ; /* 0x0000000402127825 */
/* 0x040fe200078e020a */
/*0290*/ LDG.E R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x0002e8000c1e1900 */
/*02a0*/ LDG.E R7, [R12.64+0xc] ; /* 0x00000c040c077981 */
/* 0x000f22000c1e1900 */
/*02b0*/ IMAD.WIDE R14, R2, 0x4, R18 ; /* 0x00000004020e7825 */
/* 0x000fc600078e0212 */
/*02c0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000b26000c1e1900 */
/*02d0*/ IMAD.WIDE R20, R2.reuse, 0x4, R14 ; /* 0x0000000402147825 */
/* 0x040fe200078e020e */
/*02e0*/ LDG.E R26, [R14.64] ; /* 0x000000040e1a7981 */
/* 0x000128000c1e1900 */
/*02f0*/ LDG.E R9, [R12.64+0x10] ; /* 0x000010040c097981 */
/* 0x000f28000c1e1900 */
/*0300*/ LDG.E R19, [R12.64+0x8] ; /* 0x000008040c137981 */
/* 0x020f22000c1e1900 */
/*0310*/ IMAD.WIDE R14, R2, 0x4, R20 ; /* 0x00000004020e7825 */
/* 0x001fc600078e0214 */
/*0320*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000166000c1e1900 */
/*0330*/ IMAD.WIDE R22, R2.reuse, 0x4, R14 ; /* 0x0000000402167825 */
/* 0x040fe200078e020e */
/*0340*/ LDG.E R8, [R14.64] ; /* 0x000000040e087981 */
/* 0x000168000c1e1900 */
/*0350*/ LDG.E R11, [R12.64+0x14] ; /* 0x000014040c0b7981 */
/* 0x002f62000c1e1900 */
/*0360*/ IMAD.WIDE R24, R2, 0x4, R22 ; /* 0x0000000402187825 */
/* 0x000fc600078e0216 */
/*0370*/ LDG.E R10, [R22.64] ; /* 0x00000004160a7981 */
/* 0x000368000c1e1900 */
/*0380*/ LDG.E R21, [R12.64+0x18] ; /* 0x000018040c157981 */
/* 0x001f62000c1e1900 */
/*0390*/ IMAD R29, R29, R27, R28 ; /* 0x0000001b1d1d7224 */
/* 0x004fc600078e021c */
/*03a0*/ LDG.E R27, [R12.64+0x1c] ; /* 0x00001c040c1b7981 */
/* 0x000ea8000c1e1900 */
/*03b0*/ LDG.E R28, [R24.64] ; /* 0x00000004181c7981 */
/* 0x0000a2000c1e1900 */
/*03c0*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */
/* 0x000fc800078e0218 */
/*03d0*/ IMAD R29, R16, R17, R29 ; /* 0x00000011101d7224 */
/* 0x008fe400078e021d */
/*03e0*/ IMAD.WIDE R16, R2, 0x4, R14 ; /* 0x0000000402107825 */
/* 0x000fe400078e020e */
/*03f0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0006a4000c1e1900 */
/*0400*/ IMAD R29, R18, R19, R29 ; /* 0x00000013121d7224 */
/* 0x010fe400078e021d */
/*0410*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */
/* 0x000fe400078e0210 */
/*0420*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x0008a4000c1e1900 */
/*0430*/ IMAD R26, R26, R7, R29 ; /* 0x000000071a1a7224 */
/* 0x000fc400078e021d */
/*0440*/ IMAD.WIDE R22, R2.reuse, 0x4, R18 ; /* 0x0000000402167825 */
/* 0x042fe200078e0212 */
/*0450*/ LDG.E R7, [R12.64+0x20] ; /* 0x000020040c077981 */
/* 0x000ea8000c1e1900 */
/*0460*/ LDG.E R29, [R12.64+0x24] ; /* 0x000024040c1d7981 */
/* 0x000ea2000c1e1900 */
/*0470*/ IMAD.WIDE R24, R2, 0x4, R22 ; /* 0x0000000402187825 */
/* 0x001fc600078e0216 */
/*0480*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x0000a2000c1e1900 */
/*0490*/ IMAD R9, R20, R9, R26 ; /* 0x0000000914097224 */
/* 0x020fc600078e021a */
/*04a0*/ LDG.E R26, [R12.64+0x28] ; /* 0x000028040c1a7981 */
/* 0x000f62000c1e1900 */
/*04b0*/ IMAD R11, R8, R11, R9 ; /* 0x0000000b080b7224 */
/* 0x000fe400078e0209 */
/*04c0*/ IMAD.WIDE R8, R2, 0x4, R24 ; /* 0x0000000402087825 */
/* 0x000fe200078e0218 */
/*04d0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000368000c1e1900 */
/*04e0*/ LDG.E R17, [R12.64+0x2c] ; /* 0x00002c040c117981 */
/* 0x010f22000c1e1900 */
/*04f0*/ IMAD R21, R10, R21, R11 ; /* 0x000000150a157224 */
/* 0x000fc600078e020b */
/*0500*/ LDG.E R15, [R24.64] ; /* 0x00000004180f7981 */
/* 0x008722000c1e1900 */
/*0510*/ IMAD.WIDE R10, R2, 0x4, R8 ; /* 0x00000004020a7825 */
/* 0x000fc600078e0208 */
/*0520*/ LDG.E R19, [R8.64] ; /* 0x0000000408137981 */
/* 0x001128000c1e1900 */
/*0530*/ LDG.E R23, [R10.64] ; /* 0x000000040a177981 */
/* 0x002f28000c1e1900 */
/*0540*/ LDG.E R24, [R12.64+0x30] ; /* 0x000030040c187981 */
/* 0x008ee8000c1e1900 */
/*0550*/ LDG.E R25, [R12.64+0x38] ; /* 0x000038040c197981 */
/* 0x000ee8000c1e1900 */
/*0560*/ LDG.E R8, [R12.64+0x3c] ; /* 0x00003c040c087981 */
/* 0x001ee2000c1e1900 */
/*0570*/ IMAD R9, R28, R27, R21 ; /* 0x0000001b1c097224 */
/* 0x004fc600078e0215 */
/*0580*/ LDG.E R28, [R12.64+0x34] ; /* 0x000034040c1c7981 */
/* 0x000ea2000c1e1900 */
/*0590*/ IMAD.WIDE R20, R2, 0x4, R10 ; /* 0x0000000402147825 */
/* 0x000fca00078e020a */
/*05a0*/ LDG.E R27, [R20.64] ; /* 0x00000004141b7981 */
/* 0x000ea2000c1e1900 */
/*05b0*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */
/* 0x000fc80007ffe0ff */
/*05c0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe20003f24270 */
/*05d0*/ IMAD R7, R14, R7, R9 ; /* 0x000000070e077224 */
/* 0x000fc800078e0209 */
/*05e0*/ IMAD R7, R16, R29, R7 ; /* 0x0000001d10077224 */
/* 0x000fc800078e0207 */
/*05f0*/ IMAD R7, R18, R26, R7 ; /* 0x0000001a12077224 */
/* 0x020fc800078e0207 */
/*0600*/ IMAD R7, R22, R17, R7 ; /* 0x0000001116077224 */
/* 0x010fe200078e0207 */
/*0610*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0620*/ IADD3 R4, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x000fc60007ffe0ff */
/*0630*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0640*/ IMAD R7, R15, R24, R7 ; /* 0x000000180f077224 */
/* 0x008fc800078e0207 */
/*0650*/ IMAD R28, R19, R28, R7 ; /* 0x0000001c131c7224 */
/* 0x004fc800078e0207 */
/*0660*/ IMAD R28, R23, R25, R28 ; /* 0x00000019171c7224 */
/* 0x000fe400078e021c */
/*0670*/ IMAD.WIDE R24, R2, 0x4, R20 ; /* 0x0000000402187825 */
/* 0x000fc800078e0214 */
/*0680*/ IMAD R28, R27, R8, R28 ; /* 0x000000081b1c7224 */
/* 0x000fe200078e021c */
/*0690*/ @P1 BRA 0x210 ; /* 0xfffffb7000001947 */
/* 0x000fea000383ffff */
/*06a0*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*06b0*/ @!P1 BRA 0x940 ; /* 0x0000028000009947 */
/* 0x000fea0003800000 */
/*06c0*/ IMAD.WIDE R16, R2, 0x4, R24 ; /* 0x0000000402107825 */
/* 0x000fe200078e0218 */
/*06d0*/ MOV R8, UR6 ; /* 0x0000000600087c02 */
/* 0x000fe20008000f00 */
/*06e0*/ LDG.E R7, [R24.64] ; /* 0x0000000418077981 */
/* 0x0000a2000c1e1900 */
/*06f0*/ MOV R9, UR7 ; /* 0x0000000700097c02 */
/* 0x000fc60008000f00 */
/*0700*/ IMAD.WIDE R12, R2.reuse, 0x4, R16 ; /* 0x00000004020c7825 */
/* 0x040fe200078e0210 */
/*0710*/ LDG.E R21, [R16.64] ; /* 0x0000000410157981 */
/* 0x0002e6000c1e1900 */
/*0720*/ IMAD.WIDE R8, R3, 0x4, R8 ; /* 0x0000000403087825 */
/* 0x000fe200078e0208 */
/*0730*/ LDG.E R23, [R12.64] ; /* 0x000000040c177981 */
/* 0x000966000c1e1900 */
/*0740*/ IMAD.WIDE R14, R2.reuse, 0x4, R12 ; /* 0x00000004020e7825 */
/* 0x040fe200078e020c */
/*0750*/ LDG.E R20, [R8.64] ; /* 0x0000000408147981 */
/* 0x000ea8000c1e1900 */
/*0760*/ LDG.E R22, [R8.64+0x4] ; /* 0x0000040408167981 */
/* 0x000ee2000c1e1900 */
/*0770*/ IMAD.WIDE R10, R2, 0x4, R14 ; /* 0x00000004020a7825 */
/* 0x000fc600078e020e */
/*0780*/ LDG.E R26, [R8.64+0x8] ; /* 0x00000804081a7981 */
/* 0x000f66000c1e1900 */
/*0790*/ IMAD.WIDE R16, R2.reuse, 0x4, R10 ; /* 0x0000000402107825 */
/* 0x042fe200078e020a */
/*07a0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000368000c1e1900 */
/*07b0*/ LDG.E R27, [R8.64+0xc] ; /* 0x00000c04081b7981 */
/* 0x000f62000c1e1900 */
/*07c0*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */
/* 0x000fc600078e0210 */
/*07d0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000368000c1e1900 */
/*07e0*/ LDG.E R25, [R8.64+0x10] ; /* 0x0000100408197981 */
/* 0x001f62000c1e1900 */
/*07f0*/ IMAD.WIDE R12, R2, 0x4, R18 ; /* 0x00000004020c7825 */
/* 0x010fc600078e0212 */
/*0800*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000f28000c1e1900 */
/*0810*/ LDG.E R29, [R8.64+0x14] ; /* 0x00001404081d7981 */
/* 0x000f28000c1e1900 */
/*0820*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */
/* 0x000128000c1e1900 */
/*0830*/ LDG.E R11, [R8.64+0x18] ; /* 0x00001804080b7981 */
/* 0x002f28000c1e1900 */
/*0840*/ LDG.E R15, [R12.64] ; /* 0x000000040c0f7981 */
/* 0x000f28000c1e1900 */
/*0850*/ LDG.E R18, [R8.64+0x1c] ; /* 0x00001c0408127981 */
/* 0x001f22000c1e1900 */
/*0860*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0880*/ IADD3 R4, R4, 0x8, RZ ; /* 0x0000000804047810 */
/* 0x000fe40007ffe0ff */
/*0890*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */
/* 0x000fe20007ffe0ff */
/*08a0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08b0*/ IMAD R7, R7, R20, R28 ; /* 0x0000001407077224 */
/* 0x004fc800078e021c */
/*08c0*/ IMAD R7, R21, R22, R7 ; /* 0x0000001615077224 */
/* 0x008fc800078e0207 */
/*08d0*/ IMAD R7, R23, R26, R7 ; /* 0x0000001a17077224 */
/* 0x020fc800078e0207 */
/*08e0*/ IMAD R7, R14, R27, R7 ; /* 0x0000001b0e077224 */
/* 0x000fc800078e0207 */
/*08f0*/ IMAD R7, R10, R25, R7 ; /* 0x000000190a077224 */
/* 0x000fc800078e0207 */
/*0900*/ IMAD R7, R16, R29, R7 ; /* 0x0000001d10077224 */
/* 0x010fc800078e0207 */
/*0910*/ IMAD R7, R24, R11, R7 ; /* 0x0000000b18077224 */
/* 0x000fe400078e0207 */
/*0920*/ IMAD.WIDE R24, R2, 0x4, R12 ; /* 0x0000000402187825 */
/* 0x000fc800078e020c */
/*0930*/ IMAD R28, R15, R18, R7 ; /* 0x000000120f1c7224 */
/* 0x000fe400078e0207 */
/*0940*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0000705670 */
/*0950*/ @!P0 BRA 0xaf0 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*0960*/ MOV R8, UR6 ; /* 0x0000000600087c02 */
/* 0x000fe20008000f00 */
/*0970*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */
/* 0x000fe200078e0218 */
/*0980*/ MOV R9, UR7 ; /* 0x0000000700097c02 */
/* 0x000fe20008000f00 */
/*0990*/ LDG.E R25, [R24.64] ; /* 0x0000000418197981 */
/* 0x000ea8000c1e1900 */
/*09a0*/ IMAD.WIDE R8, R3, 0x4, R8 ; /* 0x0000000403087825 */
/* 0x000fc800078e0208 */
/*09b0*/ IMAD.WIDE R12, R2.reuse, 0x4, R14 ; /* 0x00000004020c7825 */
/* 0x040fe200078e020e */
/*09c0*/ LDG.E R7, [R8.64] ; /* 0x0000000408077981 */
/* 0x000ea8000c1e1900 */
/*09d0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ee2000c1e1900 */
/*09e0*/ IMAD.WIDE R10, R2, 0x4, R12 ; /* 0x00000004020a7825 */
/* 0x000fc600078e020c */
/*09f0*/ LDG.E R16, [R8.64+0x4] ; /* 0x0000040408107981 */
/* 0x000ee8000c1e1900 */
/*0a00*/ LDG.E R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000f28000c1e1900 */
/*0a10*/ LDG.E R17, [R8.64+0x8] ; /* 0x0000080408117981 */
/* 0x000f28000c1e1900 */
/*0a20*/ LDG.E R19, [R8.64+0xc] ; /* 0x00000c0408137981 */
/* 0x000f68000c1e1900 */
/*0a30*/ LDG.E R20, [R10.64] ; /* 0x000000040a147981 */
/* 0x000f62000c1e1900 */
/*0a40*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */
/* 0x000fc80007ffe0ff */
/*0a50*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0a60*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a70*/ IADD3 R4, R4, 0x4, RZ ; /* 0x0000000404047810 */
/* 0x000fc60007ffe0ff */
/*0a80*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0a90*/ IMAD R7, R25, R7, R28 ; /* 0x0000000719077224 */
/* 0x004fc800078e021c */
/*0aa0*/ IMAD R7, R14, R16, R7 ; /* 0x000000100e077224 */
/* 0x008fe400078e0207 */
/*0ab0*/ IMAD.WIDE R24, R2, 0x4, R10 ; /* 0x0000000402187825 */
/* 0x000fc800078e020a */
/*0ac0*/ IMAD R7, R18, R17, R7 ; /* 0x0000001112077224 */
/* 0x010fc800078e0207 */
/*0ad0*/ IMAD R28, R20, R19, R7 ; /* 0x00000013141c7224 */
/* 0x020fe200078e0207 */
/*0ae0*/ @P0 BRA 0x960 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0af0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0b00*/ @!P0 BRA 0xbf0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0b10*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0b20*/ IADD3 R6, R3, R4, RZ ; /* 0x0000000403067210 */
/* 0x000fe20007ffe0ff */
/*0b30*/ IMAD R4, R4, c[0x0][0x178], R0 ; /* 0x00005e0004047a24 */
/* 0x000fd000078e0200 */
/*0b40*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0209 */
/*0b50*/ IMAD.WIDE R8, R4, R9, c[0x0][0x168] ; /* 0x00005a0004087625 */
/* 0x000fca00078e0209 */
/*0b60*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */
/* 0x0000a8000c1e1900 */
/*0b70*/ LDG.E R4, [R6.64] ; /* 0x0000000406047981 */
/* 0x0002a2000c1e1900 */
/*0b80*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */
/* 0x000fc80007ffe0ff */
/*0b90*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0ba0*/ IMAD.WIDE R8, R2, 0x4, R8 ; /* 0x0000000402087825 */
/* 0x001fe200078e0208 */
/*0bb0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0bc0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0bd0*/ IMAD R28, R11, R4, R28 ; /* 0x000000040b1c7224 */
/* 0x004fcc00078e021c */
/*0be0*/ @P0 BRA 0xb60 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0bf0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*0c00*/ MOV R2, 0x4 ; /* 0x0000000400027802 */
/* 0x000fca0000000f00 */
/*0c10*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fca00078e0202 */
/*0c20*/ STG.E [R2.64], R28 ; /* 0x0000001c02007986 */
/* 0x000fe2000c101904 */
/*0c30*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c40*/ BRA 0xc40; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void MatrixMultiplication__CudaKernel(int* in_tabA, int* in_tabB, int* out_tabC, int outTabWidth)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
//making sure that extra threads will do not any work
if (row < outTabWidth && col < outTabWidth)
{
int tmp_sum = 0;
//#pragma unroll
for (int i = 0; i < outTabWidth; i++)
{
tmp_sum += in_tabA[row * outTabWidth + i] * in_tabB[i * outTabWidth + col];
}
out_tabC[row * outTabWidth + col] = tmp_sum;
}
} | .file "tmpxft_0011c356_00000000-6_MatrixMultiplication__CudaKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i
.type _Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i, @function
_Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z32MatrixMultiplication__CudaKernelPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i, .-_Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i
.globl _Z32MatrixMultiplication__CudaKernelPiS_S_i
.type _Z32MatrixMultiplication__CudaKernelPiS_S_i, @function
_Z32MatrixMultiplication__CudaKernelPiS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z32MatrixMultiplication__CudaKernelPiS_S_i, .-_Z32MatrixMultiplication__CudaKernelPiS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z32MatrixMultiplication__CudaKernelPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z32MatrixMultiplication__CudaKernelPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void MatrixMultiplication__CudaKernel(int* in_tabA, int* in_tabB, int* out_tabC, int outTabWidth)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
//making sure that extra threads will do not any work
if (row < outTabWidth && col < outTabWidth)
{
int tmp_sum = 0;
//#pragma unroll
for (int i = 0; i < outTabWidth; i++)
{
tmp_sum += in_tabA[row * outTabWidth + i] * in_tabB[i * outTabWidth + col];
}
out_tabC[row * outTabWidth + col] = tmp_sum;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void MatrixMultiplication__CudaKernel(int* in_tabA, int* in_tabB, int* out_tabC, int outTabWidth)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
//making sure that extra threads will do not any work
if (row < outTabWidth && col < outTabWidth)
{
int tmp_sum = 0;
//#pragma unroll
for (int i = 0; i < outTabWidth; i++)
{
tmp_sum += in_tabA[row * outTabWidth + i] * in_tabB[i * outTabWidth + col];
}
out_tabC[row * outTabWidth + col] = tmp_sum;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void MatrixMultiplication__CudaKernel(int* in_tabA, int* in_tabB, int* out_tabC, int outTabWidth)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
//making sure that extra threads will do not any work
if (row < outTabWidth && col < outTabWidth)
{
int tmp_sum = 0;
//#pragma unroll
for (int i = 0; i < outTabWidth; i++)
{
tmp_sum += in_tabA[row * outTabWidth + i] * in_tabB[i * outTabWidth + col];
}
out_tabC[row * outTabWidth + col] = tmp_sum;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z32MatrixMultiplication__CudaKernelPiS_S_i
.globl _Z32MatrixMultiplication__CudaKernelPiS_S_i
.p2align 8
.type _Z32MatrixMultiplication__CudaKernelPiS_S_i,@function
_Z32MatrixMultiplication__CudaKernelPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_6
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v2, v0, s2
s_mov_b32 s3, s2
v_mov_b32_e32 v5, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
.p2align 6
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v6, 31, v5
s_add_i32 s3, s3, -1
s_cmp_eq_u32 s3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[5:6]
v_add_co_u32 v6, vcc_lo, s6, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
global_load_b32 v8, v[3:4], off
global_load_b32 v9, v[6:7], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[6:7], null, v9, v8, v[2:3]
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_dual_mov_b32 v2, v6 :: v_dual_add_nc_u32 v5, s2, v5
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v2, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[3:4], null, v0, s2, v[1:2]
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z32MatrixMultiplication__CudaKernelPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z32MatrixMultiplication__CudaKernelPiS_S_i, .Lfunc_end0-_Z32MatrixMultiplication__CudaKernelPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z32MatrixMultiplication__CudaKernelPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z32MatrixMultiplication__CudaKernelPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void MatrixMultiplication__CudaKernel(int* in_tabA, int* in_tabB, int* out_tabC, int outTabWidth)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
//making sure that extra threads will do not any work
if (row < outTabWidth && col < outTabWidth)
{
int tmp_sum = 0;
//#pragma unroll
for (int i = 0; i < outTabWidth; i++)
{
tmp_sum += in_tabA[row * outTabWidth + i] * in_tabB[i * outTabWidth + col];
}
out_tabC[row * outTabWidth + col] = tmp_sum;
}
} | .text
.file "MatrixMultiplication__CudaKernel.hip"
.globl _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i # -- Begin function _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i
.p2align 4, 0x90
.type _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i,@function
_Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i: # @_Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z32MatrixMultiplication__CudaKernelPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i, .Lfunc_end0-_Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z32MatrixMultiplication__CudaKernelPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z32MatrixMultiplication__CudaKernelPiS_S_i,@object # @_Z32MatrixMultiplication__CudaKernelPiS_S_i
.section .rodata,"a",@progbits
.globl _Z32MatrixMultiplication__CudaKernelPiS_S_i
.p2align 3, 0x0
_Z32MatrixMultiplication__CudaKernelPiS_S_i:
.quad _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i
.size _Z32MatrixMultiplication__CudaKernelPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z32MatrixMultiplication__CudaKernelPiS_S_i"
.size .L__unnamed_1, 44
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z32MatrixMultiplication__CudaKernelPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z32MatrixMultiplication__CudaKernelPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e280000002200 */
/*0030*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e680000002500 */
/*0040*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R3, c[0x0][0x0], R2 ; /* 0x0000000003037a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R2, c[0x0][0x178] ; /* 0x00005e0000027a02 */
/* 0x000fe20000000f00 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R28, -RZ, RZ, 0, 0 ; /* 0x00000000ff1c7435 */
/* 0x000fe200000001ff */
/*00d0*/ IMAD R3, R3, c[0x0][0x178], RZ ; /* 0x00005e0003037a24 */
/* 0x000fe200078e02ff */
/*00e0*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fda0003f06270 */
/*00f0*/ @!P0 BRA 0xbf0 ; /* 0x00000af000008947 */
/* 0x000fea0003800000 */
/*0100*/ IADD3 R4, R2.reuse, -0x1, RZ ; /* 0xffffffff02047810 */
/* 0x040fe40007ffe0ff */
/*0110*/ LOP3.LUT R5, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302057812 */
/* 0x000fe400078ec0ff */
/*0120*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe40003f06070 */
/*0130*/ MOV R4, RZ ; /* 0x000000ff00047202 */
/* 0x000fe40000000f00 */
/*0140*/ MOV R28, RZ ; /* 0x000000ff001c7202 */
/* 0x000fd20000000f00 */
/*0150*/ @!P0 BRA 0xaf0 ; /* 0x0000099000008947 */
/* 0x000fea0003800000 */
/*0160*/ IADD3 R6, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005067a10 */
/* 0x000fe20007ffe1ff */
/*0170*/ HFMA2.MMA R25, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff197435 */
/* 0x000fe200000001ff */
/*0180*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0190*/ MOV R4, RZ ; /* 0x000000ff00047202 */
/* 0x000fe40000000f00 */
/*01a0*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fcc0003f04270 */
/*01b0*/ IMAD.WIDE R24, R0, R25, c[0x0][0x168] ; /* 0x00005a0000187625 */
/* 0x000fce00078e0219 */
/*01c0*/ @!P0 BRA 0x960 ; /* 0x0000079000008947 */
/* 0x000fea0003800000 */
/*01d0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe40003f24270 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01f0*/ @!P1 BRA 0x6a0 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0210*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0220*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x0000a2000c1e1900 */
/*0230*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0240*/ IMAD.WIDE R12, R3, 0x4, R12 ; /* 0x00000004030c7825 */
/* 0x000fca00078e020c */
/*0250*/ LDG.E R27, [R12.64] ; /* 0x000000040c1b7981 */
/* 0x000ea2000c1e1900 */
/*0260*/ IMAD.WIDE R10, R2, 0x4, R24 ; /* 0x00000004020a7825 */
/* 0x000fc600078e0218 */
/*0270*/ LDG.E R17, [R12.64+0x4] ; /* 0x000004040c117981 */
/* 0x000ee6000c1e1900 */
/*0280*/ IMAD.WIDE R18, R2.reuse, 0x4, R10 ; /* 0x0000000402127825 */
/* 0x040fe200078e020a */
/*0290*/ LDG.E R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x0002e8000c1e1900 */
/*02a0*/ LDG.E R7, [R12.64+0xc] ; /* 0x00000c040c077981 */
/* 0x000f22000c1e1900 */
/*02b0*/ IMAD.WIDE R14, R2, 0x4, R18 ; /* 0x00000004020e7825 */
/* 0x000fc600078e0212 */
/*02c0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000b26000c1e1900 */
/*02d0*/ IMAD.WIDE R20, R2.reuse, 0x4, R14 ; /* 0x0000000402147825 */
/* 0x040fe200078e020e */
/*02e0*/ LDG.E R26, [R14.64] ; /* 0x000000040e1a7981 */
/* 0x000128000c1e1900 */
/*02f0*/ LDG.E R9, [R12.64+0x10] ; /* 0x000010040c097981 */
/* 0x000f28000c1e1900 */
/*0300*/ LDG.E R19, [R12.64+0x8] ; /* 0x000008040c137981 */
/* 0x020f22000c1e1900 */
/*0310*/ IMAD.WIDE R14, R2, 0x4, R20 ; /* 0x00000004020e7825 */
/* 0x001fc600078e0214 */
/*0320*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000166000c1e1900 */
/*0330*/ IMAD.WIDE R22, R2.reuse, 0x4, R14 ; /* 0x0000000402167825 */
/* 0x040fe200078e020e */
/*0340*/ LDG.E R8, [R14.64] ; /* 0x000000040e087981 */
/* 0x000168000c1e1900 */
/*0350*/ LDG.E R11, [R12.64+0x14] ; /* 0x000014040c0b7981 */
/* 0x002f62000c1e1900 */
/*0360*/ IMAD.WIDE R24, R2, 0x4, R22 ; /* 0x0000000402187825 */
/* 0x000fc600078e0216 */
/*0370*/ LDG.E R10, [R22.64] ; /* 0x00000004160a7981 */
/* 0x000368000c1e1900 */
/*0380*/ LDG.E R21, [R12.64+0x18] ; /* 0x000018040c157981 */
/* 0x001f62000c1e1900 */
/*0390*/ IMAD R29, R29, R27, R28 ; /* 0x0000001b1d1d7224 */
/* 0x004fc600078e021c */
/*03a0*/ LDG.E R27, [R12.64+0x1c] ; /* 0x00001c040c1b7981 */
/* 0x000ea8000c1e1900 */
/*03b0*/ LDG.E R28, [R24.64] ; /* 0x00000004181c7981 */
/* 0x0000a2000c1e1900 */
/*03c0*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */
/* 0x000fc800078e0218 */
/*03d0*/ IMAD R29, R16, R17, R29 ; /* 0x00000011101d7224 */
/* 0x008fe400078e021d */
/*03e0*/ IMAD.WIDE R16, R2, 0x4, R14 ; /* 0x0000000402107825 */
/* 0x000fe400078e020e */
/*03f0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0006a4000c1e1900 */
/*0400*/ IMAD R29, R18, R19, R29 ; /* 0x00000013121d7224 */
/* 0x010fe400078e021d */
/*0410*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */
/* 0x000fe400078e0210 */
/*0420*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x0008a4000c1e1900 */
/*0430*/ IMAD R26, R26, R7, R29 ; /* 0x000000071a1a7224 */
/* 0x000fc400078e021d */
/*0440*/ IMAD.WIDE R22, R2.reuse, 0x4, R18 ; /* 0x0000000402167825 */
/* 0x042fe200078e0212 */
/*0450*/ LDG.E R7, [R12.64+0x20] ; /* 0x000020040c077981 */
/* 0x000ea8000c1e1900 */
/*0460*/ LDG.E R29, [R12.64+0x24] ; /* 0x000024040c1d7981 */
/* 0x000ea2000c1e1900 */
/*0470*/ IMAD.WIDE R24, R2, 0x4, R22 ; /* 0x0000000402187825 */
/* 0x001fc600078e0216 */
/*0480*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x0000a2000c1e1900 */
/*0490*/ IMAD R9, R20, R9, R26 ; /* 0x0000000914097224 */
/* 0x020fc600078e021a */
/*04a0*/ LDG.E R26, [R12.64+0x28] ; /* 0x000028040c1a7981 */
/* 0x000f62000c1e1900 */
/*04b0*/ IMAD R11, R8, R11, R9 ; /* 0x0000000b080b7224 */
/* 0x000fe400078e0209 */
/*04c0*/ IMAD.WIDE R8, R2, 0x4, R24 ; /* 0x0000000402087825 */
/* 0x000fe200078e0218 */
/*04d0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000368000c1e1900 */
/*04e0*/ LDG.E R17, [R12.64+0x2c] ; /* 0x00002c040c117981 */
/* 0x010f22000c1e1900 */
/*04f0*/ IMAD R21, R10, R21, R11 ; /* 0x000000150a157224 */
/* 0x000fc600078e020b */
/*0500*/ LDG.E R15, [R24.64] ; /* 0x00000004180f7981 */
/* 0x008722000c1e1900 */
/*0510*/ IMAD.WIDE R10, R2, 0x4, R8 ; /* 0x00000004020a7825 */
/* 0x000fc600078e0208 */
/*0520*/ LDG.E R19, [R8.64] ; /* 0x0000000408137981 */
/* 0x001128000c1e1900 */
/*0530*/ LDG.E R23, [R10.64] ; /* 0x000000040a177981 */
/* 0x002f28000c1e1900 */
/*0540*/ LDG.E R24, [R12.64+0x30] ; /* 0x000030040c187981 */
/* 0x008ee8000c1e1900 */
/*0550*/ LDG.E R25, [R12.64+0x38] ; /* 0x000038040c197981 */
/* 0x000ee8000c1e1900 */
/*0560*/ LDG.E R8, [R12.64+0x3c] ; /* 0x00003c040c087981 */
/* 0x001ee2000c1e1900 */
/*0570*/ IMAD R9, R28, R27, R21 ; /* 0x0000001b1c097224 */
/* 0x004fc600078e0215 */
/*0580*/ LDG.E R28, [R12.64+0x34] ; /* 0x000034040c1c7981 */
/* 0x000ea2000c1e1900 */
/*0590*/ IMAD.WIDE R20, R2, 0x4, R10 ; /* 0x0000000402147825 */
/* 0x000fca00078e020a */
/*05a0*/ LDG.E R27, [R20.64] ; /* 0x00000004141b7981 */
/* 0x000ea2000c1e1900 */
/*05b0*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */
/* 0x000fc80007ffe0ff */
/*05c0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe20003f24270 */
/*05d0*/ IMAD R7, R14, R7, R9 ; /* 0x000000070e077224 */
/* 0x000fc800078e0209 */
/*05e0*/ IMAD R7, R16, R29, R7 ; /* 0x0000001d10077224 */
/* 0x000fc800078e0207 */
/*05f0*/ IMAD R7, R18, R26, R7 ; /* 0x0000001a12077224 */
/* 0x020fc800078e0207 */
/*0600*/ IMAD R7, R22, R17, R7 ; /* 0x0000001116077224 */
/* 0x010fe200078e0207 */
/*0610*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0620*/ IADD3 R4, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x000fc60007ffe0ff */
/*0630*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0640*/ IMAD R7, R15, R24, R7 ; /* 0x000000180f077224 */
/* 0x008fc800078e0207 */
/*0650*/ IMAD R28, R19, R28, R7 ; /* 0x0000001c131c7224 */
/* 0x004fc800078e0207 */
/*0660*/ IMAD R28, R23, R25, R28 ; /* 0x00000019171c7224 */
/* 0x000fe400078e021c */
/*0670*/ IMAD.WIDE R24, R2, 0x4, R20 ; /* 0x0000000402187825 */
/* 0x000fc800078e0214 */
/*0680*/ IMAD R28, R27, R8, R28 ; /* 0x000000081b1c7224 */
/* 0x000fe200078e021c */
/*0690*/ @P1 BRA 0x210 ; /* 0xfffffb7000001947 */
/* 0x000fea000383ffff */
/*06a0*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*06b0*/ @!P1 BRA 0x940 ; /* 0x0000028000009947 */
/* 0x000fea0003800000 */
/*06c0*/ IMAD.WIDE R16, R2, 0x4, R24 ; /* 0x0000000402107825 */
/* 0x000fe200078e0218 */
/*06d0*/ MOV R8, UR6 ; /* 0x0000000600087c02 */
/* 0x000fe20008000f00 */
/*06e0*/ LDG.E R7, [R24.64] ; /* 0x0000000418077981 */
/* 0x0000a2000c1e1900 */
/*06f0*/ MOV R9, UR7 ; /* 0x0000000700097c02 */
/* 0x000fc60008000f00 */
/*0700*/ IMAD.WIDE R12, R2.reuse, 0x4, R16 ; /* 0x00000004020c7825 */
/* 0x040fe200078e0210 */
/*0710*/ LDG.E R21, [R16.64] ; /* 0x0000000410157981 */
/* 0x0002e6000c1e1900 */
/*0720*/ IMAD.WIDE R8, R3, 0x4, R8 ; /* 0x0000000403087825 */
/* 0x000fe200078e0208 */
/*0730*/ LDG.E R23, [R12.64] ; /* 0x000000040c177981 */
/* 0x000966000c1e1900 */
/*0740*/ IMAD.WIDE R14, R2.reuse, 0x4, R12 ; /* 0x00000004020e7825 */
/* 0x040fe200078e020c */
/*0750*/ LDG.E R20, [R8.64] ; /* 0x0000000408147981 */
/* 0x000ea8000c1e1900 */
/*0760*/ LDG.E R22, [R8.64+0x4] ; /* 0x0000040408167981 */
/* 0x000ee2000c1e1900 */
/*0770*/ IMAD.WIDE R10, R2, 0x4, R14 ; /* 0x00000004020a7825 */
/* 0x000fc600078e020e */
/*0780*/ LDG.E R26, [R8.64+0x8] ; /* 0x00000804081a7981 */
/* 0x000f66000c1e1900 */
/*0790*/ IMAD.WIDE R16, R2.reuse, 0x4, R10 ; /* 0x0000000402107825 */
/* 0x042fe200078e020a */
/*07a0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000368000c1e1900 */
/*07b0*/ LDG.E R27, [R8.64+0xc] ; /* 0x00000c04081b7981 */
/* 0x000f62000c1e1900 */
/*07c0*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */
/* 0x000fc600078e0210 */
/*07d0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000368000c1e1900 */
/*07e0*/ LDG.E R25, [R8.64+0x10] ; /* 0x0000100408197981 */
/* 0x001f62000c1e1900 */
/*07f0*/ IMAD.WIDE R12, R2, 0x4, R18 ; /* 0x00000004020c7825 */
/* 0x010fc600078e0212 */
/*0800*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000f28000c1e1900 */
/*0810*/ LDG.E R29, [R8.64+0x14] ; /* 0x00001404081d7981 */
/* 0x000f28000c1e1900 */
/*0820*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */
/* 0x000128000c1e1900 */
/*0830*/ LDG.E R11, [R8.64+0x18] ; /* 0x00001804080b7981 */
/* 0x002f28000c1e1900 */
/*0840*/ LDG.E R15, [R12.64] ; /* 0x000000040c0f7981 */
/* 0x000f28000c1e1900 */
/*0850*/ LDG.E R18, [R8.64+0x1c] ; /* 0x00001c0408127981 */
/* 0x001f22000c1e1900 */
/*0860*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0880*/ IADD3 R4, R4, 0x8, RZ ; /* 0x0000000804047810 */
/* 0x000fe40007ffe0ff */
/*0890*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */
/* 0x000fe20007ffe0ff */
/*08a0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08b0*/ IMAD R7, R7, R20, R28 ; /* 0x0000001407077224 */
/* 0x004fc800078e021c */
/*08c0*/ IMAD R7, R21, R22, R7 ; /* 0x0000001615077224 */
/* 0x008fc800078e0207 */
/*08d0*/ IMAD R7, R23, R26, R7 ; /* 0x0000001a17077224 */
/* 0x020fc800078e0207 */
/*08e0*/ IMAD R7, R14, R27, R7 ; /* 0x0000001b0e077224 */
/* 0x000fc800078e0207 */
/*08f0*/ IMAD R7, R10, R25, R7 ; /* 0x000000190a077224 */
/* 0x000fc800078e0207 */
/*0900*/ IMAD R7, R16, R29, R7 ; /* 0x0000001d10077224 */
/* 0x010fc800078e0207 */
/*0910*/ IMAD R7, R24, R11, R7 ; /* 0x0000000b18077224 */
/* 0x000fe400078e0207 */
/*0920*/ IMAD.WIDE R24, R2, 0x4, R12 ; /* 0x0000000402187825 */
/* 0x000fc800078e020c */
/*0930*/ IMAD R28, R15, R18, R7 ; /* 0x000000120f1c7224 */
/* 0x000fe400078e0207 */
/*0940*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0000705670 */
/*0950*/ @!P0 BRA 0xaf0 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*0960*/ MOV R8, UR6 ; /* 0x0000000600087c02 */
/* 0x000fe20008000f00 */
/*0970*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */
/* 0x000fe200078e0218 */
/*0980*/ MOV R9, UR7 ; /* 0x0000000700097c02 */
/* 0x000fe20008000f00 */
/*0990*/ LDG.E R25, [R24.64] ; /* 0x0000000418197981 */
/* 0x000ea8000c1e1900 */
/*09a0*/ IMAD.WIDE R8, R3, 0x4, R8 ; /* 0x0000000403087825 */
/* 0x000fc800078e0208 */
/*09b0*/ IMAD.WIDE R12, R2.reuse, 0x4, R14 ; /* 0x00000004020c7825 */
/* 0x040fe200078e020e */
/*09c0*/ LDG.E R7, [R8.64] ; /* 0x0000000408077981 */
/* 0x000ea8000c1e1900 */
/*09d0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ee2000c1e1900 */
/*09e0*/ IMAD.WIDE R10, R2, 0x4, R12 ; /* 0x00000004020a7825 */
/* 0x000fc600078e020c */
/*09f0*/ LDG.E R16, [R8.64+0x4] ; /* 0x0000040408107981 */
/* 0x000ee8000c1e1900 */
/*0a00*/ LDG.E R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000f28000c1e1900 */
/*0a10*/ LDG.E R17, [R8.64+0x8] ; /* 0x0000080408117981 */
/* 0x000f28000c1e1900 */
/*0a20*/ LDG.E R19, [R8.64+0xc] ; /* 0x00000c0408137981 */
/* 0x000f68000c1e1900 */
/*0a30*/ LDG.E R20, [R10.64] ; /* 0x000000040a147981 */
/* 0x000f62000c1e1900 */
/*0a40*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */
/* 0x000fc80007ffe0ff */
/*0a50*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0a60*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a70*/ IADD3 R4, R4, 0x4, RZ ; /* 0x0000000404047810 */
/* 0x000fc60007ffe0ff */
/*0a80*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0a90*/ IMAD R7, R25, R7, R28 ; /* 0x0000000719077224 */
/* 0x004fc800078e021c */
/*0aa0*/ IMAD R7, R14, R16, R7 ; /* 0x000000100e077224 */
/* 0x008fe400078e0207 */
/*0ab0*/ IMAD.WIDE R24, R2, 0x4, R10 ; /* 0x0000000402187825 */
/* 0x000fc800078e020a */
/*0ac0*/ IMAD R7, R18, R17, R7 ; /* 0x0000001112077224 */
/* 0x010fc800078e0207 */
/*0ad0*/ IMAD R28, R20, R19, R7 ; /* 0x00000013141c7224 */
/* 0x020fe200078e0207 */
/*0ae0*/ @P0 BRA 0x960 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0af0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0b00*/ @!P0 BRA 0xbf0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0b10*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0b20*/ IADD3 R6, R3, R4, RZ ; /* 0x0000000403067210 */
/* 0x000fe20007ffe0ff */
/*0b30*/ IMAD R4, R4, c[0x0][0x178], R0 ; /* 0x00005e0004047a24 */
/* 0x000fd000078e0200 */
/*0b40*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0209 */
/*0b50*/ IMAD.WIDE R8, R4, R9, c[0x0][0x168] ; /* 0x00005a0004087625 */
/* 0x000fca00078e0209 */
/*0b60*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */
/* 0x0000a8000c1e1900 */
/*0b70*/ LDG.E R4, [R6.64] ; /* 0x0000000406047981 */
/* 0x0002a2000c1e1900 */
/*0b80*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */
/* 0x000fc80007ffe0ff */
/*0b90*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0ba0*/ IMAD.WIDE R8, R2, 0x4, R8 ; /* 0x0000000402087825 */
/* 0x001fe200078e0208 */
/*0bb0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0bc0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0bd0*/ IMAD R28, R11, R4, R28 ; /* 0x000000040b1c7224 */
/* 0x004fcc00078e021c */
/*0be0*/ @P0 BRA 0xb60 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0bf0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*0c00*/ MOV R2, 0x4 ; /* 0x0000000400027802 */
/* 0x000fca0000000f00 */
/*0c10*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fca00078e0202 */
/*0c20*/ STG.E [R2.64], R28 ; /* 0x0000001c02007986 */
/* 0x000fe2000c101904 */
/*0c30*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c40*/ BRA 0xc40; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z32MatrixMultiplication__CudaKernelPiS_S_i
.globl _Z32MatrixMultiplication__CudaKernelPiS_S_i
.p2align 8
.type _Z32MatrixMultiplication__CudaKernelPiS_S_i,@function
_Z32MatrixMultiplication__CudaKernelPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s3, 0xffff
s_lshr_b32 s3, s3, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s14, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_6
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v2, v0, s2
s_mov_b32 s3, s2
v_mov_b32_e32 v5, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
.p2align 6
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v6, 31, v5
s_add_i32 s3, s3, -1
s_cmp_eq_u32 s3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[5:6]
v_add_co_u32 v6, vcc_lo, s6, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
global_load_b32 v8, v[3:4], off
global_load_b32 v9, v[6:7], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[6:7], null, v9, v8, v[2:3]
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_dual_mov_b32 v2, v6 :: v_dual_add_nc_u32 v5, s2, v5
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v2, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[3:4], null, v0, s2, v[1:2]
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[3:4]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z32MatrixMultiplication__CudaKernelPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z32MatrixMultiplication__CudaKernelPiS_S_i, .Lfunc_end0-_Z32MatrixMultiplication__CudaKernelPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z32MatrixMultiplication__CudaKernelPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z32MatrixMultiplication__CudaKernelPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0011c356_00000000-6_MatrixMultiplication__CudaKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i
.type _Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i, @function
_Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z32MatrixMultiplication__CudaKernelPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i, .-_Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i
.globl _Z32MatrixMultiplication__CudaKernelPiS_S_i
.type _Z32MatrixMultiplication__CudaKernelPiS_S_i, @function
_Z32MatrixMultiplication__CudaKernelPiS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z57__device_stub__Z32MatrixMultiplication__CudaKernelPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z32MatrixMultiplication__CudaKernelPiS_S_i, .-_Z32MatrixMultiplication__CudaKernelPiS_S_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z32MatrixMultiplication__CudaKernelPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z32MatrixMultiplication__CudaKernelPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "MatrixMultiplication__CudaKernel.hip"
.globl _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i # -- Begin function _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i
.p2align 4, 0x90
.type _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i,@function
_Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i: # @_Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z32MatrixMultiplication__CudaKernelPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i, .Lfunc_end0-_Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z32MatrixMultiplication__CudaKernelPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z32MatrixMultiplication__CudaKernelPiS_S_i,@object # @_Z32MatrixMultiplication__CudaKernelPiS_S_i
.section .rodata,"a",@progbits
.globl _Z32MatrixMultiplication__CudaKernelPiS_S_i
.p2align 3, 0x0
_Z32MatrixMultiplication__CudaKernelPiS_S_i:
.quad _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i
.size _Z32MatrixMultiplication__CudaKernelPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z32MatrixMultiplication__CudaKernelPiS_S_i"
.size .L__unnamed_1, 44
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z47__device_stub__MatrixMultiplication__CudaKernelPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z32MatrixMultiplication__CudaKernelPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define N 3 // dim of matrix
//Fattened matrix multiplication . Kernel does not support x,y addressing
__global__ void mat_multiply(int* d_mat1, int* d_mat2, int* d_mat3, int width)
{
int k,sum=0;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(row<width && col<width)
{
for(k=0;k<width;k++)
{
sum += d_mat1[row*width+k] * d_mat2[k*width+col];
}
d_mat3[row*width+col] = sum;
}
}
int main()
{
int i,j;
int SIZE = N*N;
//int BYTES = SIZE*sizeof(int);
int *d_mat1, *d_mat2, *d_mat3;
// allocate memory on the device
cudaMallocManaged(&d_mat1,N*N*sizeof(int));
cudaMallocManaged(&d_mat2,N*N*sizeof(int));
cudaMallocManaged(&d_mat3,N*N*sizeof(int));
// generate matrix on host
for(i=0;i<N*N;i++) //linearize array
{
d_mat1[i] = 1;
d_mat2[i] = 1;
d_mat3[i] = 0;
}
dim3 dimGrid(1,1);
dim3 dimBlock(N,N);
// lauch kernel
mat_multiply<<<dimGrid,dimBlock>>>(d_mat1,d_mat2,d_mat3,N);
cudaDeviceSynchronize();
for(i=0;i<N*N;i++)
{
printf("%d ",d_mat3[i]);
if(i%N==0 && i>N)
printf("\n");
}
printf("\n");
} | code for sm_80
Function : _Z12mat_multiplyPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R2, c[0x0][0x4], R5 ; /* 0x0000010002037a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ MOV R2, c[0x0][0x178] ; /* 0x00005e0000027a02 */
/* 0x000fe20000000f00 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R28, -RZ, RZ, 0, 0 ; /* 0x00000000ff1c7435 */
/* 0x000fe200000001ff */
/*00d0*/ IMAD R3, R3, c[0x0][0x178], RZ ; /* 0x00005e0003037a24 */
/* 0x000fe200078e02ff */
/*00e0*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fda0003f06270 */
/*00f0*/ @!P0 BRA 0xbf0 ; /* 0x00000af000008947 */
/* 0x000fea0003800000 */
/*0100*/ IADD3 R4, R2.reuse, -0x1, RZ ; /* 0xffffffff02047810 */
/* 0x040fe40007ffe0ff */
/*0110*/ LOP3.LUT R5, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302057812 */
/* 0x000fe400078ec0ff */
/*0120*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe40003f06070 */
/*0130*/ MOV R28, RZ ; /* 0x000000ff001c7202 */
/* 0x000fe40000000f00 */
/*0140*/ MOV R4, RZ ; /* 0x000000ff00047202 */
/* 0x000fd20000000f00 */
/*0150*/ @!P0 BRA 0xaf0 ; /* 0x0000099000008947 */
/* 0x000fea0003800000 */
/*0160*/ IADD3 R6, -R5, c[0x0][0x178], RZ ; /* 0x00005e0005067a10 */
/* 0x000fe20007ffe1ff */
/*0170*/ HFMA2.MMA R25, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff197435 */
/* 0x000fe200000001ff */
/*0180*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0190*/ MOV R28, RZ ; /* 0x000000ff001c7202 */
/* 0x000fe40000000f00 */
/*01a0*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fcc0003f04270 */
/*01b0*/ IMAD.WIDE R24, R0, R25, c[0x0][0x168] ; /* 0x00005a0000187625 */
/* 0x000fce00078e0219 */
/*01c0*/ @!P0 BRA 0x960 ; /* 0x0000079000008947 */
/* 0x000fea0003800000 */
/*01d0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe40003f24270 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01f0*/ @!P1 BRA 0x6a0 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*0200*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0210*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0220*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x0000a2000c1e1900 */
/*0230*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0240*/ IMAD.WIDE R12, R3, 0x4, R12 ; /* 0x00000004030c7825 */
/* 0x000fca00078e020c */
/*0250*/ LDG.E R27, [R12.64] ; /* 0x000000040c1b7981 */
/* 0x000ea2000c1e1900 */
/*0260*/ IMAD.WIDE R10, R2, 0x4, R24 ; /* 0x00000004020a7825 */
/* 0x000fc600078e0218 */
/*0270*/ LDG.E R17, [R12.64+0x4] ; /* 0x000004040c117981 */
/* 0x000ee6000c1e1900 */
/*0280*/ IMAD.WIDE R18, R2.reuse, 0x4, R10 ; /* 0x0000000402127825 */
/* 0x040fe200078e020a */
/*0290*/ LDG.E R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x0002e8000c1e1900 */
/*02a0*/ LDG.E R7, [R12.64+0xc] ; /* 0x00000c040c077981 */
/* 0x000f22000c1e1900 */
/*02b0*/ IMAD.WIDE R14, R2, 0x4, R18 ; /* 0x00000004020e7825 */
/* 0x000fc600078e0212 */
/*02c0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000b26000c1e1900 */
/*02d0*/ IMAD.WIDE R20, R2.reuse, 0x4, R14 ; /* 0x0000000402147825 */
/* 0x040fe200078e020e */
/*02e0*/ LDG.E R26, [R14.64] ; /* 0x000000040e1a7981 */
/* 0x000128000c1e1900 */
/*02f0*/ LDG.E R9, [R12.64+0x10] ; /* 0x000010040c097981 */
/* 0x000f28000c1e1900 */
/*0300*/ LDG.E R19, [R12.64+0x8] ; /* 0x000008040c137981 */
/* 0x020f22000c1e1900 */
/*0310*/ IMAD.WIDE R14, R2, 0x4, R20 ; /* 0x00000004020e7825 */
/* 0x001fc600078e0214 */
/*0320*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000166000c1e1900 */
/*0330*/ IMAD.WIDE R22, R2.reuse, 0x4, R14 ; /* 0x0000000402167825 */
/* 0x040fe200078e020e */
/*0340*/ LDG.E R8, [R14.64] ; /* 0x000000040e087981 */
/* 0x000168000c1e1900 */
/*0350*/ LDG.E R11, [R12.64+0x14] ; /* 0x000014040c0b7981 */
/* 0x002f62000c1e1900 */
/*0360*/ IMAD.WIDE R24, R2, 0x4, R22 ; /* 0x0000000402187825 */
/* 0x000fc600078e0216 */
/*0370*/ LDG.E R10, [R22.64] ; /* 0x00000004160a7981 */
/* 0x000368000c1e1900 */
/*0380*/ LDG.E R21, [R12.64+0x18] ; /* 0x000018040c157981 */
/* 0x001f62000c1e1900 */
/*0390*/ IMAD R29, R29, R27, R28 ; /* 0x0000001b1d1d7224 */
/* 0x004fc600078e021c */
/*03a0*/ LDG.E R27, [R12.64+0x1c] ; /* 0x00001c040c1b7981 */
/* 0x000ea8000c1e1900 */
/*03b0*/ LDG.E R28, [R24.64] ; /* 0x00000004181c7981 */
/* 0x0000a2000c1e1900 */
/*03c0*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */
/* 0x000fc800078e0218 */
/*03d0*/ IMAD R29, R16, R17, R29 ; /* 0x00000011101d7224 */
/* 0x008fe400078e021d */
/*03e0*/ IMAD.WIDE R16, R2, 0x4, R14 ; /* 0x0000000402107825 */
/* 0x000fe400078e020e */
/*03f0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0006a4000c1e1900 */
/*0400*/ IMAD R29, R18, R19, R29 ; /* 0x00000013121d7224 */
/* 0x010fe400078e021d */
/*0410*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */
/* 0x000fe400078e0210 */
/*0420*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x0008a4000c1e1900 */
/*0430*/ IMAD R26, R26, R7, R29 ; /* 0x000000071a1a7224 */
/* 0x000fc400078e021d */
/*0440*/ IMAD.WIDE R22, R2.reuse, 0x4, R18 ; /* 0x0000000402167825 */
/* 0x042fe200078e0212 */
/*0450*/ LDG.E R7, [R12.64+0x20] ; /* 0x000020040c077981 */
/* 0x000ea8000c1e1900 */
/*0460*/ LDG.E R29, [R12.64+0x24] ; /* 0x000024040c1d7981 */
/* 0x000ea2000c1e1900 */
/*0470*/ IMAD.WIDE R24, R2, 0x4, R22 ; /* 0x0000000402187825 */
/* 0x001fc600078e0216 */
/*0480*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x0000a2000c1e1900 */
/*0490*/ IMAD R9, R20, R9, R26 ; /* 0x0000000914097224 */
/* 0x020fc600078e021a */
/*04a0*/ LDG.E R26, [R12.64+0x28] ; /* 0x000028040c1a7981 */
/* 0x000f62000c1e1900 */
/*04b0*/ IMAD R11, R8, R11, R9 ; /* 0x0000000b080b7224 */
/* 0x000fe400078e0209 */
/*04c0*/ IMAD.WIDE R8, R2, 0x4, R24 ; /* 0x0000000402087825 */
/* 0x000fe200078e0218 */
/*04d0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000368000c1e1900 */
/*04e0*/ LDG.E R17, [R12.64+0x2c] ; /* 0x00002c040c117981 */
/* 0x010f22000c1e1900 */
/*04f0*/ IMAD R21, R10, R21, R11 ; /* 0x000000150a157224 */
/* 0x000fc600078e020b */
/*0500*/ LDG.E R15, [R24.64] ; /* 0x00000004180f7981 */
/* 0x008722000c1e1900 */
/*0510*/ IMAD.WIDE R10, R2, 0x4, R8 ; /* 0x00000004020a7825 */
/* 0x000fc600078e0208 */
/*0520*/ LDG.E R19, [R8.64] ; /* 0x0000000408137981 */
/* 0x001128000c1e1900 */
/*0530*/ LDG.E R23, [R10.64] ; /* 0x000000040a177981 */
/* 0x002f28000c1e1900 */
/*0540*/ LDG.E R24, [R12.64+0x30] ; /* 0x000030040c187981 */
/* 0x008ee8000c1e1900 */
/*0550*/ LDG.E R25, [R12.64+0x38] ; /* 0x000038040c197981 */
/* 0x000ee8000c1e1900 */
/*0560*/ LDG.E R8, [R12.64+0x3c] ; /* 0x00003c040c087981 */
/* 0x001ee2000c1e1900 */
/*0570*/ IMAD R9, R28, R27, R21 ; /* 0x0000001b1c097224 */
/* 0x004fc600078e0215 */
/*0580*/ LDG.E R28, [R12.64+0x34] ; /* 0x000034040c1c7981 */
/* 0x000ea2000c1e1900 */
/*0590*/ IMAD.WIDE R20, R2, 0x4, R10 ; /* 0x0000000402147825 */
/* 0x000fca00078e020a */
/*05a0*/ LDG.E R27, [R20.64] ; /* 0x00000004141b7981 */
/* 0x000ea2000c1e1900 */
/*05b0*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */
/* 0x000fc80007ffe0ff */
/*05c0*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe20003f24270 */
/*05d0*/ IMAD R7, R14, R7, R9 ; /* 0x000000070e077224 */
/* 0x000fc800078e0209 */
/*05e0*/ IMAD R7, R16, R29, R7 ; /* 0x0000001d10077224 */
/* 0x000fc800078e0207 */
/*05f0*/ IMAD R7, R18, R26, R7 ; /* 0x0000001a12077224 */
/* 0x020fc800078e0207 */
/*0600*/ IMAD R7, R22, R17, R7 ; /* 0x0000001116077224 */
/* 0x010fe200078e0207 */
/*0610*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0620*/ IADD3 R4, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x000fc60007ffe0ff */
/*0630*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0640*/ IMAD R7, R15, R24, R7 ; /* 0x000000180f077224 */
/* 0x008fc800078e0207 */
/*0650*/ IMAD R28, R19, R28, R7 ; /* 0x0000001c131c7224 */
/* 0x004fc800078e0207 */
/*0660*/ IMAD R28, R23, R25, R28 ; /* 0x00000019171c7224 */
/* 0x000fe400078e021c */
/*0670*/ IMAD.WIDE R24, R2, 0x4, R20 ; /* 0x0000000402187825 */
/* 0x000fc800078e0214 */
/*0680*/ IMAD R28, R27, R8, R28 ; /* 0x000000081b1c7224 */
/* 0x000fe200078e021c */
/*0690*/ @P1 BRA 0x210 ; /* 0xfffffb7000001947 */
/* 0x000fea000383ffff */
/*06a0*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*06b0*/ @!P1 BRA 0x940 ; /* 0x0000028000009947 */
/* 0x000fea0003800000 */
/*06c0*/ IMAD.WIDE R16, R2, 0x4, R24 ; /* 0x0000000402107825 */
/* 0x000fe200078e0218 */
/*06d0*/ MOV R8, UR6 ; /* 0x0000000600087c02 */
/* 0x000fe20008000f00 */
/*06e0*/ LDG.E R7, [R24.64] ; /* 0x0000000418077981 */
/* 0x0000a2000c1e1900 */
/*06f0*/ MOV R9, UR7 ; /* 0x0000000700097c02 */
/* 0x000fc60008000f00 */
/*0700*/ IMAD.WIDE R12, R2.reuse, 0x4, R16 ; /* 0x00000004020c7825 */
/* 0x040fe200078e0210 */
/*0710*/ LDG.E R21, [R16.64] ; /* 0x0000000410157981 */
/* 0x0002e6000c1e1900 */
/*0720*/ IMAD.WIDE R8, R3, 0x4, R8 ; /* 0x0000000403087825 */
/* 0x000fe200078e0208 */
/*0730*/ LDG.E R23, [R12.64] ; /* 0x000000040c177981 */
/* 0x000966000c1e1900 */
/*0740*/ IMAD.WIDE R14, R2.reuse, 0x4, R12 ; /* 0x00000004020e7825 */
/* 0x040fe200078e020c */
/*0750*/ LDG.E R20, [R8.64] ; /* 0x0000000408147981 */
/* 0x000ea8000c1e1900 */
/*0760*/ LDG.E R22, [R8.64+0x4] ; /* 0x0000040408167981 */
/* 0x000ee2000c1e1900 */
/*0770*/ IMAD.WIDE R10, R2, 0x4, R14 ; /* 0x00000004020a7825 */
/* 0x000fc600078e020e */
/*0780*/ LDG.E R26, [R8.64+0x8] ; /* 0x00000804081a7981 */
/* 0x000f66000c1e1900 */
/*0790*/ IMAD.WIDE R16, R2.reuse, 0x4, R10 ; /* 0x0000000402107825 */
/* 0x042fe200078e020a */
/*07a0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000368000c1e1900 */
/*07b0*/ LDG.E R27, [R8.64+0xc] ; /* 0x00000c04081b7981 */
/* 0x000f62000c1e1900 */
/*07c0*/ IMAD.WIDE R18, R2, 0x4, R16 ; /* 0x0000000402127825 */
/* 0x000fc600078e0210 */
/*07d0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000368000c1e1900 */
/*07e0*/ LDG.E R25, [R8.64+0x10] ; /* 0x0000100408197981 */
/* 0x001f62000c1e1900 */
/*07f0*/ IMAD.WIDE R12, R2, 0x4, R18 ; /* 0x00000004020c7825 */
/* 0x010fc600078e0212 */
/*0800*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000f28000c1e1900 */
/*0810*/ LDG.E R29, [R8.64+0x14] ; /* 0x00001404081d7981 */
/* 0x000f28000c1e1900 */
/*0820*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */
/* 0x000128000c1e1900 */
/*0830*/ LDG.E R11, [R8.64+0x18] ; /* 0x00001804080b7981 */
/* 0x002f28000c1e1900 */
/*0840*/ LDG.E R15, [R12.64] ; /* 0x000000040c0f7981 */
/* 0x000f28000c1e1900 */
/*0850*/ LDG.E R18, [R8.64+0x1c] ; /* 0x00001c0408127981 */
/* 0x001f22000c1e1900 */
/*0860*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0880*/ IADD3 R4, R4, 0x8, RZ ; /* 0x0000000804047810 */
/* 0x000fe40007ffe0ff */
/*0890*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */
/* 0x000fe20007ffe0ff */
/*08a0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08b0*/ IMAD R7, R7, R20, R28 ; /* 0x0000001407077224 */
/* 0x004fc800078e021c */
/*08c0*/ IMAD R7, R21, R22, R7 ; /* 0x0000001615077224 */
/* 0x008fc800078e0207 */
/*08d0*/ IMAD R7, R23, R26, R7 ; /* 0x0000001a17077224 */
/* 0x020fc800078e0207 */
/*08e0*/ IMAD R7, R14, R27, R7 ; /* 0x0000001b0e077224 */
/* 0x000fc800078e0207 */
/*08f0*/ IMAD R7, R10, R25, R7 ; /* 0x000000190a077224 */
/* 0x000fc800078e0207 */
/*0900*/ IMAD R7, R16, R29, R7 ; /* 0x0000001d10077224 */
/* 0x010fc800078e0207 */
/*0910*/ IMAD R7, R24, R11, R7 ; /* 0x0000000b18077224 */
/* 0x000fe400078e0207 */
/*0920*/ IMAD.WIDE R24, R2, 0x4, R12 ; /* 0x0000000402187825 */
/* 0x000fc800078e020c */
/*0930*/ IMAD R28, R15, R18, R7 ; /* 0x000000120f1c7224 */
/* 0x000fe400078e0207 */
/*0940*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0000705670 */
/*0950*/ @!P0 BRA 0xaf0 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*0960*/ MOV R8, UR6 ; /* 0x0000000600087c02 */
/* 0x000fe20008000f00 */
/*0970*/ IMAD.WIDE R14, R2, 0x4, R24 ; /* 0x00000004020e7825 */
/* 0x000fe200078e0218 */
/*0980*/ MOV R9, UR7 ; /* 0x0000000700097c02 */
/* 0x000fe20008000f00 */
/*0990*/ LDG.E R25, [R24.64] ; /* 0x0000000418197981 */
/* 0x000ea8000c1e1900 */
/*09a0*/ IMAD.WIDE R8, R3, 0x4, R8 ; /* 0x0000000403087825 */
/* 0x000fc800078e0208 */
/*09b0*/ IMAD.WIDE R12, R2.reuse, 0x4, R14 ; /* 0x00000004020c7825 */
/* 0x040fe200078e020e */
/*09c0*/ LDG.E R7, [R8.64] ; /* 0x0000000408077981 */
/* 0x000ea8000c1e1900 */
/*09d0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ee2000c1e1900 */
/*09e0*/ IMAD.WIDE R10, R2, 0x4, R12 ; /* 0x00000004020a7825 */
/* 0x000fc600078e020c */
/*09f0*/ LDG.E R16, [R8.64+0x4] ; /* 0x0000040408107981 */
/* 0x000ee8000c1e1900 */
/*0a00*/ LDG.E R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x000f28000c1e1900 */
/*0a10*/ LDG.E R17, [R8.64+0x8] ; /* 0x0000080408117981 */
/* 0x000f28000c1e1900 */
/*0a20*/ LDG.E R19, [R8.64+0xc] ; /* 0x00000c0408137981 */
/* 0x000f68000c1e1900 */
/*0a30*/ LDG.E R20, [R10.64] ; /* 0x000000040a147981 */
/* 0x000f62000c1e1900 */
/*0a40*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */
/* 0x000fc80007ffe0ff */
/*0a50*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0a60*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a70*/ IADD3 R4, R4, 0x4, RZ ; /* 0x0000000404047810 */
/* 0x000fc60007ffe0ff */
/*0a80*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0a90*/ IMAD R7, R25, R7, R28 ; /* 0x0000000719077224 */
/* 0x004fc800078e021c */
/*0aa0*/ IMAD R7, R14, R16, R7 ; /* 0x000000100e077224 */
/* 0x008fe400078e0207 */
/*0ab0*/ IMAD.WIDE R24, R2, 0x4, R10 ; /* 0x0000000402187825 */
/* 0x000fc800078e020a */
/*0ac0*/ IMAD R7, R18, R17, R7 ; /* 0x0000001112077224 */
/* 0x010fc800078e0207 */
/*0ad0*/ IMAD R28, R20, R19, R7 ; /* 0x00000013141c7224 */
/* 0x020fe200078e0207 */
/*0ae0*/ @P0 BRA 0x960 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0af0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0b00*/ @!P0 BRA 0xbf0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0b10*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0b20*/ IADD3 R6, R3, R4, RZ ; /* 0x0000000403067210 */
/* 0x000fe20007ffe0ff */
/*0b30*/ IMAD R4, R4, c[0x0][0x178], R0 ; /* 0x00005e0004047a24 */
/* 0x000fd000078e0200 */
/*0b40*/ IMAD.WIDE R6, R6, R9, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fc800078e0209 */
/*0b50*/ IMAD.WIDE R8, R4, R9, c[0x0][0x168] ; /* 0x00005a0004087625 */
/* 0x000fca00078e0209 */
/*0b60*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */
/* 0x0000a8000c1e1900 */
/*0b70*/ LDG.E R4, [R6.64] ; /* 0x0000000406047981 */
/* 0x0002a2000c1e1900 */
/*0b80*/ IADD3 R5, R5, -0x1, RZ ; /* 0xffffffff05057810 */
/* 0x000fc80007ffe0ff */
/*0b90*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0ba0*/ IMAD.WIDE R8, R2, 0x4, R8 ; /* 0x0000000402087825 */
/* 0x001fe200078e0208 */
/*0bb0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0bc0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0bd0*/ IMAD R28, R11, R4, R28 ; /* 0x000000040b1c7224 */
/* 0x004fcc00078e021c */
/*0be0*/ @P0 BRA 0xb60 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0bf0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*0c00*/ MOV R2, 0x4 ; /* 0x0000000400027802 */
/* 0x000fca0000000f00 */
/*0c10*/ IMAD.WIDE R2, R3, R2, c[0x0][0x170] ; /* 0x00005c0003027625 */
/* 0x000fca00078e0202 */
/*0c20*/ STG.E [R2.64], R28 ; /* 0x0000001c02007986 */
/* 0x000fe2000c101904 */
/*0c30*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c40*/ BRA 0xc40; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define N 3 // dim of matrix
//Fattened matrix multiplication . Kernel does not support x,y addressing
__global__ void mat_multiply(int* d_mat1, int* d_mat2, int* d_mat3, int width)
{
int k,sum=0;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(row<width && col<width)
{
for(k=0;k<width;k++)
{
sum += d_mat1[row*width+k] * d_mat2[k*width+col];
}
d_mat3[row*width+col] = sum;
}
}
int main()
{
int i,j;
int SIZE = N*N;
//int BYTES = SIZE*sizeof(int);
int *d_mat1, *d_mat2, *d_mat3;
// allocate memory on the device
cudaMallocManaged(&d_mat1,N*N*sizeof(int));
cudaMallocManaged(&d_mat2,N*N*sizeof(int));
cudaMallocManaged(&d_mat3,N*N*sizeof(int));
// generate matrix on host
for(i=0;i<N*N;i++) //linearize array
{
d_mat1[i] = 1;
d_mat2[i] = 1;
d_mat3[i] = 0;
}
dim3 dimGrid(1,1);
dim3 dimBlock(N,N);
// lauch kernel
mat_multiply<<<dimGrid,dimBlock>>>(d_mat1,d_mat2,d_mat3,N);
cudaDeviceSynchronize();
for(i=0;i<N*N;i++)
{
printf("%d ",d_mat3[i]);
if(i%N==0 && i>N)
printf("\n");
}
printf("\n");
} | .file "tmpxft_00125150_00000000-6_q3.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z12mat_multiplyPiS_S_iPiS_S_i
.type _Z37__device_stub__Z12mat_multiplyPiS_S_iPiS_S_i, @function
_Z37__device_stub__Z12mat_multiplyPiS_S_iPiS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12mat_multiplyPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z37__device_stub__Z12mat_multiplyPiS_S_iPiS_S_i, .-_Z37__device_stub__Z12mat_multiplyPiS_S_iPiS_S_i
.globl _Z12mat_multiplyPiS_S_i
.type _Z12mat_multiplyPiS_S_i, @function
_Z12mat_multiplyPiS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z12mat_multiplyPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z12mat_multiplyPiS_S_i, .-_Z12mat_multiplyPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d "
.LC1:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $1, %edx
movl $36, %esi
call cudaMallocManaged@PLT
leaq 16(%rsp), %rdi
movl $1, %edx
movl $36, %esi
call cudaMallocManaged@PLT
leaq 24(%rsp), %rdi
movl $1, %edx
movl $36, %esi
call cudaMallocManaged@PLT
movl $0, %eax
.L12:
movq 8(%rsp), %rdx
movl $1, (%rdx,%rax)
movq 16(%rsp), %rdx
movl $1, (%rdx,%rax)
movq 24(%rsp), %rdx
movl $0, (%rdx,%rax)
addq $4, %rax
cmpq $36, %rax
jne .L12
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $3, 44(%rsp)
movl $3, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L13:
call cudaDeviceSynchronize@PLT
movl $0, %ebx
leaq .LC0(%rip), %rbp
leaq .LC1(%rip), %r12
jmp .L15
.L20:
movl $3, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z37__device_stub__Z12mat_multiplyPiS_S_iPiS_S_i
jmp .L13
.L14:
addq $1, %rbx
cmpq $9, %rbx
je .L21
.L15:
movq 24(%rsp), %rax
movl (%rax,%rbx,4), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movslq %ebx, %rax
imulq $1431655766, %rax, %rax
shrq $32, %rax
movl %ebx, %edx
sarl $31, %edx
subl %edx, %eax
leal (%rax,%rax,2), %eax
cmpl %ebx, %eax
jne .L14
cmpl $3, %ebx
jle .L14
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.L21:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L22
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z12mat_multiplyPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z12mat_multiplyPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define N 3 // dim of matrix
//Fattened matrix multiplication . Kernel does not support x,y addressing
__global__ void mat_multiply(int* d_mat1, int* d_mat2, int* d_mat3, int width)
{
int k,sum=0;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(row<width && col<width)
{
for(k=0;k<width;k++)
{
sum += d_mat1[row*width+k] * d_mat2[k*width+col];
}
d_mat3[row*width+col] = sum;
}
}
int main()
{
int i,j;
int SIZE = N*N;
//int BYTES = SIZE*sizeof(int);
int *d_mat1, *d_mat2, *d_mat3;
// allocate memory on the device
cudaMallocManaged(&d_mat1,N*N*sizeof(int));
cudaMallocManaged(&d_mat2,N*N*sizeof(int));
cudaMallocManaged(&d_mat3,N*N*sizeof(int));
// generate matrix on host
for(i=0;i<N*N;i++) //linearize array
{
d_mat1[i] = 1;
d_mat2[i] = 1;
d_mat3[i] = 0;
}
dim3 dimGrid(1,1);
dim3 dimBlock(N,N);
// lauch kernel
mat_multiply<<<dimGrid,dimBlock>>>(d_mat1,d_mat2,d_mat3,N);
cudaDeviceSynchronize();
for(i=0;i<N*N;i++)
{
printf("%d ",d_mat3[i]);
if(i%N==0 && i>N)
printf("\n");
}
printf("\n");
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 3 // dim of matrix
//Fattened matrix multiplication . Kernel does not support x,y addressing
__global__ void mat_multiply(int* d_mat1, int* d_mat2, int* d_mat3, int width)
{
int k,sum=0;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(row<width && col<width)
{
for(k=0;k<width;k++)
{
sum += d_mat1[row*width+k] * d_mat2[k*width+col];
}
d_mat3[row*width+col] = sum;
}
}
int main()
{
int i,j;
int SIZE = N*N;
//int BYTES = SIZE*sizeof(int);
int *d_mat1, *d_mat2, *d_mat3;
// allocate memory on the device
hipMallocManaged(&d_mat1,N*N*sizeof(int));
hipMallocManaged(&d_mat2,N*N*sizeof(int));
hipMallocManaged(&d_mat3,N*N*sizeof(int));
// generate matrix on host
for(i=0;i<N*N;i++) //linearize array
{
d_mat1[i] = 1;
d_mat2[i] = 1;
d_mat3[i] = 0;
}
dim3 dimGrid(1,1);
dim3 dimBlock(N,N);
// lauch kernel
mat_multiply<<<dimGrid,dimBlock>>>(d_mat1,d_mat2,d_mat3,N);
hipDeviceSynchronize();
for(i=0;i<N*N;i++)
{
printf("%d ",d_mat3[i]);
if(i%N==0 && i>N)
printf("\n");
}
printf("\n");
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.