system stringclasses 7
values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void gpuDot(float* dot, float* a, float* b, int N)
{
__shared__ float cache[THREADS_PER_BLOCK];
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int cacheIdx = threadIdx.x;
float temp = 0;
while (tid < N)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIdx]=temp;
__syncthreads();
int i = blockDim.x/2;
while (i != 0)
{
if (cacheIdx < i)
cache[cacheIdx] += cache[cacheIdx + i];
__syncthreads();
i /= 2;
}
if (cacheIdx == 0)
dot[blockIdx.x] = cache[0];
} | .file "tmpxft_000d4640_00000000-6_gpuDot.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i
.type _Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i, @function
_Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6gpuDotPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i, .-_Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i
.globl _Z6gpuDotPfS_S_i
.type _Z6gpuDotPfS_S_i, @function
_Z6gpuDotPfS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z6gpuDotPfS_S_i, .-_Z6gpuDotPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6gpuDotPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6gpuDotPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void gpuDot(float* dot, float* a, float* b, int N)
{
__shared__ float cache[THREADS_PER_BLOCK];
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int cacheIdx = threadIdx.x;
float temp = 0;
while (tid < N)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIdx]=temp;
__syncthreads();
int i = blockDim.x/2;
while (i != 0)
{
if (cacheIdx < i)
cache[cacheIdx] += cache[cacheIdx + i];
__syncthreads();
i /= 2;
}
if (cacheIdx == 0)
dot[blockIdx.x] = cache[0];
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpuDot(float* dot, float* a, float* b, int N)
{
__shared__ float cache[THREADS_PER_BLOCK];
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int cacheIdx = threadIdx.x;
float temp = 0;
while (tid < N)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIdx]=temp;
__syncthreads();
int i = blockDim.x/2;
while (i != 0)
{
if (cacheIdx < i)
cache[cacheIdx] += cache[cacheIdx + i];
__syncthreads();
i /= 2;
}
if (cacheIdx == 0)
dot[blockIdx.x] = cache[0];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpuDot(float* dot, float* a, float* b, int N)
{
__shared__ float cache[THREADS_PER_BLOCK];
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int cacheIdx = threadIdx.x;
float temp = 0;
while (tid < N)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIdx]=temp;
__syncthreads();
int i = blockDim.x/2;
while (i != 0)
{
if (cacheIdx < i)
cache[cacheIdx] += cache[cacheIdx + i];
__syncthreads();
i /= 2;
}
if (cacheIdx == 0)
dot[blockIdx.x] = cache[0];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6gpuDotPfS_S_i
.globl _Z6gpuDotPfS_S_i
.p2align 8
.type _Z6gpuDotPfS_S_i,@function
_Z6gpuDotPfS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s4, s0, 32
s_mov_b32 s2, s15
s_addc_u32 s5, s1, 0
v_mov_b32_e32 v3, 0
s_mov_b32 s9, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_4
s_load_b32 s11, s[4:5], 0x0
s_load_b128 s[4:7], s[0:1], 0x8
v_mov_b32_e32 v3, 0
s_mov_b32 s10, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s11, s11, s3
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[4:5], 2, v[1:2]
v_add_nc_u32_e32 v1, s11, v1
v_add_co_u32 v6, vcc_lo, s4, v4
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s8, v1
global_load_b32 v2, v[6:7], off
global_load_b32 v4, v[4:5], off
s_or_b32 s10, vcc_lo, s10
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, v2, v4
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execnz .LBB0_2
s_or_b32 exec_lo, exec_lo, s10
.LBB0_4:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s9
v_lshlrev_b32_e32 v1, 2, v0
s_cmp_lt_u32 s3, 2
ds_store_b32 v1, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_9
.LBB0_5:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x0
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.p2align 6
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s5
s_cmp_gt_u32 s3, 3
s_mov_b32 s3, s4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_5
.LBB0_9:
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB0_8
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v2, v2, v3
ds_store_b32 v1, v2
s_branch .LBB0_8
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6gpuDotPfS_S_i
.amdhsa_group_segment_fixed_size 512
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6gpuDotPfS_S_i, .Lfunc_end0-_Z6gpuDotPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 512
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6gpuDotPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6gpuDotPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpuDot(float* dot, float* a, float* b, int N)
{
__shared__ float cache[THREADS_PER_BLOCK];
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int cacheIdx = threadIdx.x;
float temp = 0;
while (tid < N)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIdx]=temp;
__syncthreads();
int i = blockDim.x/2;
while (i != 0)
{
if (cacheIdx < i)
cache[cacheIdx] += cache[cacheIdx + i];
__syncthreads();
i /= 2;
}
if (cacheIdx == 0)
dot[blockIdx.x] = cache[0];
} | .text
.file "gpuDot.hip"
.globl _Z21__device_stub__gpuDotPfS_S_i # -- Begin function _Z21__device_stub__gpuDotPfS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__gpuDotPfS_S_i,@function
_Z21__device_stub__gpuDotPfS_S_i: # @_Z21__device_stub__gpuDotPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6gpuDotPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__gpuDotPfS_S_i, .Lfunc_end0-_Z21__device_stub__gpuDotPfS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6gpuDotPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6gpuDotPfS_S_i,@object # @_Z6gpuDotPfS_S_i
.section .rodata,"a",@progbits
.globl _Z6gpuDotPfS_S_i
.p2align 3, 0x0
_Z6gpuDotPfS_S_i:
.quad _Z21__device_stub__gpuDotPfS_S_i
.size _Z6gpuDotPfS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6gpuDotPfS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__gpuDotPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6gpuDotPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6gpuDotPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0030*/ BSSY B0, 0x150 ; /* 0x0000011000007945 */
/* 0x000fe20003800000 */
/*0040*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */
/* 0x000e240000002100 */
/*0060*/ IMAD R0, R7, c[0x0][0x0], R8 ; /* 0x0000000007007a24 */
/* 0x001fca00078e0208 */
/*0070*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fda0003f06270 */
/*0080*/ @P0 BRA 0x140 ; /* 0x000000b000000947 */
/* 0x000fea0003800000 */
/*0090*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fe400078e00ff */
/*00a0*/ MOV R5, 0x4 ; /* 0x0000000400057802 */
/* 0x000fca0000000f00 */
/*00b0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fc800078e0205 */
/*00c0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fe400078e0205 */
/*00d0*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea8000c1e1900 */
/*00e0*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff097624 */
/* 0x000fc800078e00ff */
/*0100*/ IMAD R0, R9, c[0x0][0xc], R0 ; /* 0x0000030009007a24 */
/* 0x000fca00078e0200 */
/*0110*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*0120*/ FFMA R6, R5, R2, R6 ; /* 0x0000000205067223 */
/* 0x004fd80000000006 */
/*0130*/ @!P0 BRA 0xa0 ; /* 0xffffff6000008947 */
/* 0x000fea000383ffff */
/*0140*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0150*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*0160*/ STS [R8.X4], R6 ; /* 0x0000000608007388 */
/* 0x0001e20000004800 */
/*0170*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fc60008011604 */
/*0180*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0190*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f05270 */
/*01a0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf25270 */
/*01b0*/ @!P1 BRA 0x2a0 ; /* 0x000000e000009947 */
/* 0x000fea0003800000 */
/*01c0*/ SHF.L.U32 R0, R8, 0x2, RZ ; /* 0x0000000208007819 */
/* 0x001fe200000006ff */
/*01d0*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */
/* 0x000fca000f8e00ff */
/*01e0*/ ISETP.GE.AND P1, PT, R8, R3, PT ; /* 0x000000030800720c */
/* 0x000fda0003f26270 */
/*01f0*/ @!P1 LEA R2, R3.reuse, R0, 0x2 ; /* 0x0000000003029211 */
/* 0x040fe200078e10ff */
/*0200*/ @!P1 LDS R4, [R8.X4] ; /* 0x0000000008049984 */
/* 0x000fe80000004800 */
/*0210*/ @!P1 LDS R5, [R2] ; /* 0x0000000002059984 */
/* 0x000e240000000800 */
/*0220*/ @!P1 FADD R4, R4, R5 ; /* 0x0000000504049221 */
/* 0x001fe20000000000 */
/*0230*/ IADD3 R5, R3.reuse, 0x1, RZ ; /* 0x0000000103057810 */
/* 0x040fe40007ffe0ff */
/*0240*/ LEA.HI R3, R3, R3, RZ, 0x1 ; /* 0x0000000303037211 */
/* 0x000fc400078f08ff */
/*0250*/ @!P1 STS [R8.X4], R4 ; /* 0x0000000408009388 */
/* 0x0001e80000004800 */
/*0260*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0270*/ ISETP.GT.U32.AND P1, PT, R5, 0x2, PT ; /* 0x000000020500780c */
/* 0x000fe40003f24070 */
/*0280*/ SHF.R.S32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fd60000011403 */
/*0290*/ @P1 BRA 0x1e0 ; /* 0xffffff4000001947 */
/* 0x001fea000383ffff */
/*02a0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*02b0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*02c0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fc800078e00ff */
/*02d0*/ IMAD.WIDE.U32 R2, R7, R2, c[0x0][0x160] ; /* 0x0000580007027625 */
/* 0x000fca00078e0002 */
/*02e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*02f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0300*/ BRA 0x300; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6gpuDotPfS_S_i
.globl _Z6gpuDotPfS_S_i
.p2align 8
.type _Z6gpuDotPfS_S_i,@function
_Z6gpuDotPfS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s4, s0, 32
s_mov_b32 s2, s15
s_addc_u32 s5, s1, 0
v_mov_b32_e32 v3, 0
s_mov_b32 s9, exec_lo
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_4
s_load_b32 s11, s[4:5], 0x0
s_load_b128 s[4:7], s[0:1], 0x8
v_mov_b32_e32 v3, 0
s_mov_b32 s10, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s11, s11, s3
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[4:5], 2, v[1:2]
v_add_nc_u32_e32 v1, s11, v1
v_add_co_u32 v6, vcc_lo, s4, v4
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s8, v1
global_load_b32 v2, v[6:7], off
global_load_b32 v4, v[4:5], off
s_or_b32 s10, vcc_lo, s10
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v3, v2, v4
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execnz .LBB0_2
s_or_b32 exec_lo, exec_lo, s10
.LBB0_4:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s9
v_lshlrev_b32_e32 v1, 2, v0
s_cmp_lt_u32 s3, 2
ds_store_b32 v1, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_9
.LBB0_5:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x0
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.p2align 6
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s5
s_cmp_gt_u32 s3, 3
s_mov_b32 s3, s4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_5
.LBB0_9:
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB0_8
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v2, v2, v3
ds_store_b32 v1, v2
s_branch .LBB0_8
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6gpuDotPfS_S_i
.amdhsa_group_segment_fixed_size 512
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6gpuDotPfS_S_i, .Lfunc_end0-_Z6gpuDotPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 512
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6gpuDotPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6gpuDotPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000d4640_00000000-6_gpuDot.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i
.type _Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i, @function
_Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6gpuDotPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i, .-_Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i
.globl _Z6gpuDotPfS_S_i
.type _Z6gpuDotPfS_S_i, @function
_Z6gpuDotPfS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6gpuDotPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z6gpuDotPfS_S_i, .-_Z6gpuDotPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6gpuDotPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6gpuDotPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gpuDot.hip"
.globl _Z21__device_stub__gpuDotPfS_S_i # -- Begin function _Z21__device_stub__gpuDotPfS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__gpuDotPfS_S_i,@function
_Z21__device_stub__gpuDotPfS_S_i: # @_Z21__device_stub__gpuDotPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6gpuDotPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__gpuDotPfS_S_i, .Lfunc_end0-_Z21__device_stub__gpuDotPfS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6gpuDotPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6gpuDotPfS_S_i,@object # @_Z6gpuDotPfS_S_i
.section .rodata,"a",@progbits
.globl _Z6gpuDotPfS_S_i
.p2align 3, 0x0
_Z6gpuDotPfS_S_i:
.quad _Z21__device_stub__gpuDotPfS_S_i
.size _Z6gpuDotPfS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6gpuDotPfS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__gpuDotPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6gpuDotPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
double flux_0kc0jc0ic0 = dxinv0 * 0.8 * _t_1_;
double _t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
flux_0kc0jc0ic0 -= dxinv0 * 0.2 * _t_2_;
double _t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
flux_0kc0jc0ic0 += dxinv0 * 0.038 * _t_3_;
double _t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
flux_0kc0jc0ic0 -= dxinv0 * 0.0035 * _t_4_;
double _t_6_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_6_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_6_ += q_4[k][j][i+1];
_t_6_ -= q_4[k][j][i-1];
double flux_1kc0jc0ic0 = dxinv0 * 0.8 * _t_6_;
double _t_7_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_7_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_7_ += q_4[k][j][i+2];
_t_7_ -= q_4[k][j][i-2];
flux_1kc0jc0ic0 -= dxinv0 * 0.2 * _t_7_;
double _t_8_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_8_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_8_ += q_4[k][j][i+3];
_t_8_ -= q_4[k][j][i-3];
flux_1kc0jc0ic0 += dxinv0 * 0.038 * _t_8_;
double _t_9_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_9_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_9_ += q_4[k][j][i+4];
_t_9_ -= q_4[k][j][i-4];
flux_1kc0jc0ic0 -= dxinv0 * 0.0035 * _t_9_;
double _t_11_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_11_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
double flux_2kc0jc0ic0 = dxinv0 * 0.8 * _t_11_;
double _t_12_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_12_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
flux_2kc0jc0ic0 -= dxinv0 * 0.2 * _t_12_;
double _t_13_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_13_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
flux_2kc0jc0ic0 += dxinv0 * 0.038 * _t_13_;
double _t_14_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_14_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
flux_2kc0jc0ic0 -= dxinv0 * 0.0035 * _t_14_;
double _t_16_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_16_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
double flux_3kc0jc0ic0 = dxinv0 * 0.8 * _t_16_;
double _t_17_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_17_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
flux_3kc0jc0ic0 -= dxinv0 * 0.2 * _t_17_;
double _t_18_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_18_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
flux_3kc0jc0ic0 += dxinv0 * 0.038 * _t_18_;
double _t_19_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_19_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
flux_3kc0jc0ic0 -= dxinv0 * 0.0035 * _t_19_;
double _t_21_ = q_4[k][j][i+1] * q_1[k][j][i+1];
double _v_24_ = cons_4[k][j][i+1] * q_1[k][j][i+1];
_t_21_ += _v_24_;
_t_21_ -= cons_4[k][j][i-1] * q_1[k][j][i-1];
double _v_27_ = q_4[k][j][i-1] * q_1[k][j][i-1];
_t_21_ -= _v_27_;
double flux_4kc0jc0ic0 = dxinv0 * 0.8 * _t_21_;
double _t_22_ = q_4[k][j][i+2] * q_1[k][j][i+2];
double _v_28_ = cons_4[k][j][i+2] * q_1[k][j][i+2];
_t_22_ += _v_28_;
_t_22_ -= cons_4[k][j][i-2] * q_1[k][j][i-2];
double _v_31_ = q_4[k][j][i-2] * q_1[k][j][i-2];
_t_22_ -= _v_31_;
flux_4kc0jc0ic0 -= dxinv0 * 0.2 * _t_22_;
double _t_23_ = q_4[k][j][i+3] * q_1[k][j][i+3];
double _v_32_ = cons_4[k][j][i+3] * q_1[k][j][i+3];
_t_23_ += _v_32_;
_t_23_ -= cons_4[k][j][i-3] * q_1[k][j][i-3];
double _v_35_ = q_4[k][j][i-3] * q_1[k][j][i-3];
_t_23_ -= _v_35_;
flux_4kc0jc0ic0 += dxinv0 * 0.038 * _t_23_;
double _t_24_ = q_4[k][j][i+4] * q_1[k][j][i+4];
double _v_36_ = cons_4[k][j][i+4] * q_1[k][j][i+4];
_t_24_ += _v_36_;
_t_24_ -= cons_4[k][j][i-4] * q_1[k][j][i-4];
double _v_39_ = q_4[k][j][i-4] * q_1[k][j][i-4];
_t_24_ -= _v_39_;
flux_4kc0jc0ic0 -= dxinv0 * 0.0035 * _t_24_;
double _t_27_ = cons_2[k][j+1][i];
_t_27_ -= cons_2[k][j-1][i];
double _t_25_ = dxinv1 * 0.8 * _t_27_;
double _t_28_ = cons_2[k][j+2][i];
_t_28_ -= cons_2[k][j-2][i];
_t_25_ -= dxinv1 * 0.2 * _t_28_;
double _t_29_ = cons_2[k][j+3][i];
_t_29_ -= cons_2[k][j-3][i];
_t_25_ += dxinv1 * 0.038 * _t_29_;
double _t_30_ = cons_2[k][j+4][i];
_t_30_ -= cons_2[k][j-4][i];
_t_25_ -= dxinv1 * 0.0035 * _t_30_;
flux_0kc0jc0ic0 -= _t_25_;
double _t_33_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_33_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
double _t_31_ = dxinv1 * 0.8 * _t_33_;
double _t_34_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_34_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_31_ -= dxinv1 * 0.2 * _t_34_;
double _t_35_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_35_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += dxinv1 * 0.038 * _t_35_;
double _t_36_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_36_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_31_ -= dxinv1 * 0.0035 * _t_36_;
flux_1kc0jc0ic0 -= _t_31_;
double _t_39_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_39_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_39_ += q_4[k][j+1][i];
_t_39_ -= q_4[k][j-1][i];
double _t_37_ = dxinv1 * 0.8 * _t_39_;
double _t_40_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_40_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_40_ += q_4[k][j+2][i];
_t_40_ -= q_4[k][j-2][i];
_t_37_ -= dxinv1 * 0.2 * _t_40_;
double _t_41_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_41_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_41_ += q_4[k][j+3][i];
_t_41_ -= q_4[k][j-3][i];
_t_37_ += dxinv1 * 0.038 * _t_41_;
double _t_42_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_42_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_42_ += q_4[k][j+4][i];
_t_42_ -= q_4[k][j-4][i];
_t_37_ -= dxinv1 * 0.0035 * _t_42_;
flux_2kc0jc0ic0 -= _t_37_;
double _t_45_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_45_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
double _t_43_ = dxinv1 * 0.8 * _t_45_;
double _t_46_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_46_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_43_ -= dxinv1 * 0.2 * _t_46_;
double _t_47_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_47_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_43_ += dxinv1 * 0.038 * _t_47_;
double _t_48_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_48_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_43_ -= dxinv1 * 0.0035 * _t_48_;
flux_3kc0jc0ic0 -= _t_43_;
double _t_51_ = q_4[k][j+1][i] * q_2[k][j+1][i];
double _v_64_ = cons_4[k][j+1][i] * q_2[k][j+1][i];
_t_51_ += _v_64_;
_t_51_ -= cons_4[k][j-1][i] * q_2[k][j-1][i];
double _v_67_ = q_4[k][j-1][i] * q_2[k][j-1][i];
_t_51_ -= _v_67_;
double _t_49_ = dxinv1 * 0.8 * _t_51_;
double _t_52_ = q_4[k][j+2][i] * q_2[k][j+2][i];
double _v_68_ = cons_4[k][j+2][i] * q_2[k][j+2][i];
_t_52_ += _v_68_;
_t_52_ -= cons_4[k][j-2][i] * q_2[k][j-2][i];
double _v_71_ = q_4[k][j-2][i] * q_2[k][j-2][i];
_t_52_ -= _v_71_;
_t_49_ -= dxinv1 * 0.2 * _t_52_;
double _t_53_ = q_4[k][j+3][i] * q_2[k][j+3][i];
double _v_72_ = cons_4[k][j+3][i] * q_2[k][j+3][i];
_t_53_ += _v_72_;
_t_53_ -= cons_4[k][j-3][i] * q_2[k][j-3][i];
double _v_75_ = q_4[k][j-3][i] * q_2[k][j-3][i];
_t_53_ -= _v_75_;
_t_49_ += dxinv1 * 0.038 * _t_53_;
double _t_54_ = q_4[k][j+4][i] * q_2[k][j+4][i];
double _v_76_ = cons_4[k][j+4][i] * q_2[k][j+4][i];
_t_54_ += _v_76_;
_t_54_ -= cons_4[k][j-4][i] * q_2[k][j-4][i];
double _v_79_ = q_4[k][j-4][i] * q_2[k][j-4][i];
_t_54_ -= _v_79_;
_t_49_ -= dxinv1 * 0.0035 * _t_54_;
flux_4kc0jc0ic0 -= _t_49_;
double _t_57_ = cons_3[k+1][j][i];
_t_57_ -= cons_3[k-1][j][i];
double _t_55_ = dxinv2 * 0.8 * _t_57_;
double _t_58_ = cons_3[k+2][j][i];
_t_58_ -= cons_3[k-2][j][i];
_t_55_ -= dxinv2 * 0.2 * _t_58_;
double _t_59_ = cons_3[k+3][j][i];
_t_59_ -= cons_3[k-3][j][i];
_t_55_ += dxinv2 * 0.038 * _t_59_;
double _t_60_ = cons_3[k+4][j][i];
_t_60_ -= cons_3[k-4][j][i];
_t_55_ -= dxinv2 * 0.0035 * _t_60_;
flux_0kc0jc0ic0 -= _t_55_;
double _t_63_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_63_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
double _t_61_ = dxinv2 * 0.8 * _t_63_;
double _t_64_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_64_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_61_ -= dxinv2 * 0.2 * _t_64_;
double _t_65_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_65_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_61_ += dxinv2 * 0.038 * _t_65_;
double _t_66_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_66_ -= cons_1[k-4][j][i] * q_3[k-4][j][i];
_t_61_ -= dxinv2 * 0.0035 * _t_66_;
flux_1kc0jc0ic0 -= _t_61_;
double _t_69_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
_t_69_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
double _t_67_ = dxinv2 * 0.8 * _t_69_;
double _t_70_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_70_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_67_ -= dxinv2 * 0.2 * _t_70_;
double _t_71_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_71_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_67_ += dxinv2 * 0.038 * _t_71_;
double _t_72_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_72_ -= cons_2[k-4][j][i] * q_3[k-4][j][i];
_t_67_ -= dxinv2 * 0.0035 * _t_72_;
flux_2kc0jc0ic0 -= _t_67_;
double _t_75_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_75_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_75_ += q_4[k+1][j][i];
_t_75_ -= q_4[k-1][j][i];
double _t_73_ = dxinv2 * 0.8 * _t_75_;
double _t_76_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_76_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_76_ += q_4[k+2][j][i];
_t_76_ -= q_4[k-2][j][i];
_t_73_ -= dxinv2 * 0.2 * _t_76_;
double _t_77_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_77_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_77_ += q_4[k+3][j][i];
_t_77_ -= q_4[k-3][j][i];
_t_73_ += dxinv2 * 0.038 * _t_77_;
double _t_78_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_78_ -= cons_3[k-4][j][i] * q_3[k-4][j][i];
_t_78_ += q_4[k+4][j][i];
_t_78_ -= q_4[k-4][j][i];
_t_73_ -= dxinv2 * 0.0035 * _t_78_;
flux_3kc0jc0ic0 -= _t_73_;
double _t_81_ = q_4[k+1][j][i] * q_3[k+1][j][i];
double _v_104_ = cons_4[k+1][j][i] * q_3[k+1][j][i];
_t_81_ += _v_104_;
_t_81_ -= cons_4[k-1][j][i] * q_3[k-1][j][i];
double _v_107_ = q_4[k-1][j][i] * q_3[k-1][j][i];
_t_81_ -= _v_107_;
double _t_79_ = dxinv2 * 0.8 * _t_81_;
double _t_82_ = q_4[k+2][j][i] * q_3[k+2][j][i];
double _v_108_ = cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_82_ += _v_108_;
_t_82_ -= cons_4[k-2][j][i] * q_3[k-2][j][i];
double _v_111_ = q_4[k-2][j][i] * q_3[k-2][j][i];
_t_82_ -= _v_111_;
_t_79_ -= dxinv2 * 0.2 * _t_82_;
double _t_83_ = q_4[k+3][j][i] * q_3[k+3][j][i];
double _v_112_ = cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_83_ += _v_112_;
_t_83_ -= cons_4[k-3][j][i] * q_3[k-3][j][i];
double _v_115_ = q_4[k-3][j][i] * q_3[k-3][j][i];
_t_83_ -= _v_115_;
_t_79_ += dxinv2 * 0.038 * _t_83_;
double _t_84_ = q_4[k+4][j][i] * q_3[k+4][j][i];
double _v_116_ = cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_84_ += _v_116_;
_t_84_ -= cons_4[k-4][j][i] * q_3[k-4][j][i];
double _v_119_ = q_4[k-4][j][i] * q_3[k-4][j][i];
_t_84_ -= _v_119_;
_t_79_ -= dxinv2 * 0.0035 * _t_84_;
flux_4kc0jc0ic0 -= _t_79_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux_4[k][j][i] = flux_4kc0jc0ic0;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
} | .file "tmpxft_000c598c_00000000-6_max-reordered-b.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "CUDA error : %s, %s\n"
.text
.globl _Z11check_errorPKc
.type _Z11check_errorPKc, @function
_Z11check_errorPKc:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
call cudaGetLastError@PLT
testl %eax, %eax
jne .L6
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movq %rbx, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z11check_errorPKc, .-_Z11check_errorPKc
.globl _Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.type _Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, @function
_Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii:
.LFB2083:
.cfi_startproc
endbr64
subq $376, %rsp
.cfi_def_cfa_offset 384
movsd %xmm0, 24(%rsp)
movsd %xmm1, 16(%rsp)
movsd %xmm2, 8(%rsp)
movq %fs:40, %rax
movq %rax, 360(%rsp)
xorl %eax, %eax
movq %rdi, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 208(%rsp)
movq %rsi, 48(%rsp)
leaq 48(%rsp), %rax
movq %rax, 216(%rsp)
movq %rdx, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 224(%rsp)
movq %rcx, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, 232(%rsp)
movq %r8, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 240(%rsp)
movq %r9, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 248(%rsp)
movq 384(%rsp), %rax
movq %rax, 88(%rsp)
leaq 88(%rsp), %rax
movq %rax, 256(%rsp)
movq 392(%rsp), %rax
movq %rax, 96(%rsp)
leaq 96(%rsp), %rax
movq %rax, 264(%rsp)
movq 400(%rsp), %rax
movq %rax, 104(%rsp)
leaq 104(%rsp), %rax
movq %rax, 272(%rsp)
movq 408(%rsp), %rax
movq %rax, 112(%rsp)
leaq 112(%rsp), %rax
movq %rax, 280(%rsp)
movq 416(%rsp), %rax
movq %rax, 120(%rsp)
leaq 120(%rsp), %rax
movq %rax, 288(%rsp)
movq 424(%rsp), %rax
movq %rax, 128(%rsp)
leaq 128(%rsp), %rax
movq %rax, 296(%rsp)
movq 432(%rsp), %rax
movq %rax, 136(%rsp)
leaq 136(%rsp), %rax
movq %rax, 304(%rsp)
leaq 24(%rsp), %rax
movq %rax, 312(%rsp)
leaq 16(%rsp), %rax
movq %rax, 320(%rsp)
leaq 8(%rsp), %rax
movq %rax, 328(%rsp)
leaq 440(%rsp), %rax
movq %rax, 336(%rsp)
leaq 448(%rsp), %rax
movq %rax, 344(%rsp)
leaq 456(%rsp), %rax
movq %rax, 352(%rsp)
movl $1, 160(%rsp)
movl $1, 164(%rsp)
movl $1, 168(%rsp)
movl $1, 172(%rsp)
movl $1, 176(%rsp)
movl $1, 180(%rsp)
leaq 152(%rsp), %rcx
leaq 144(%rsp), %rdx
leaq 172(%rsp), %rsi
leaq 160(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 360(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $376, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 152(%rsp)
.cfi_def_cfa_offset 392
pushq 152(%rsp)
.cfi_def_cfa_offset 400
leaq 224(%rsp), %r9
movq 188(%rsp), %rcx
movl 196(%rsp), %r8d
movq 176(%rsp), %rsi
movl 184(%rsp), %edx
leaq _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 384
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, .-_Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.globl _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.type _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, @function
_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 88(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 88(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 88(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 88(%rsp)
.cfi_def_cfa_offset 48
pushq 88(%rsp)
.cfi_def_cfa_offset 56
pushq 88(%rsp)
.cfi_def_cfa_offset 64
pushq 88(%rsp)
.cfi_def_cfa_offset 72
pushq 88(%rsp)
.cfi_def_cfa_offset 80
pushq 88(%rsp)
.cfi_def_cfa_offset 88
pushq 88(%rsp)
.cfi_def_cfa_offset 96
call _Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
addq $88, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, .-_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Failed to allocate device memory for flux_0\n"
.align 8
.LC2:
.string "Failed to allocate device memory for flux_1\n"
.align 8
.LC3:
.string "Failed to allocate device memory for flux_2\n"
.align 8
.LC4:
.string "Failed to allocate device memory for flux_3\n"
.align 8
.LC5:
.string "Failed to allocate device memory for flux_4\n"
.align 8
.LC6:
.string "Failed to allocate device memory for cons_1\n"
.align 8
.LC7:
.string "Failed to allocate device memory for cons_2\n"
.align 8
.LC8:
.string "Failed to allocate device memory for cons_3\n"
.align 8
.LC9:
.string "Failed to allocate device memory for cons_4\n"
.align 8
.LC10:
.string "Failed to allocate device memory for q_1\n"
.align 8
.LC11:
.string "Failed to allocate device memory for q_2\n"
.align 8
.LC12:
.string "Failed to allocate device memory for q_3\n"
.align 8
.LC13:
.string "Failed to allocate device memory for q_4\n"
.text
.globl host_code
.type host_code, @function
host_code:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $264, %rsp
.cfi_def_cfa_offset 320
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 320(%rsp), %r15
movq 328(%rsp), %r14
movq 336(%rsp), %rax
movq %rax, 48(%rsp)
movq 344(%rsp), %rbx
movq %rbx, 56(%rsp)
movq 352(%rsp), %r10
movq %r10, 64(%rsp)
movq 360(%rsp), %r11
movq %r11, 72(%rsp)
movq 368(%rsp), %rbp
movq %rbp, 80(%rsp)
movsd %xmm0, 88(%rsp)
movsd %xmm1, 96(%rsp)
movsd %xmm2, 104(%rsp)
movl 376(%rsp), %ebp
movl 384(%rsp), %r12d
movl 392(%rsp), %r13d
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
movslq %r12d, %rbx
movslq %r13d, %rax
imulq %rax, %rbx
movslq %ebp, %rax
imulq %rax, %rbx
salq $3, %rbx
leaq 120(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC1(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq (%rsp), %rsi
movq 120(%rsp), %rdi
call cudaMemcpy@PLT
leaq 128(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC2(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 8(%rsp), %rsi
movq 128(%rsp), %rdi
call cudaMemcpy@PLT
leaq 136(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC3(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 16(%rsp), %rsi
movq 136(%rsp), %rdi
call cudaMemcpy@PLT
leaq 144(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC4(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq 144(%rsp), %rdi
call cudaMemcpy@PLT
leaq 152(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC5(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 32(%rsp), %rsi
movq 152(%rsp), %rdi
call cudaMemcpy@PLT
leaq 160(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC6(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 40(%rsp), %rsi
movq 160(%rsp), %rdi
call cudaMemcpy@PLT
leaq 168(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC7(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq %r15, %rsi
movq 168(%rsp), %rdi
call cudaMemcpy@PLT
leaq 176(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC8(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 176(%rsp), %rdi
call cudaMemcpy@PLT
leaq 184(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC9(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 48(%rsp), %rsi
movq 184(%rsp), %rdi
call cudaMemcpy@PLT
leaq 192(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC10(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 56(%rsp), %rsi
movq 192(%rsp), %rdi
call cudaMemcpy@PLT
leaq 200(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC11(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 64(%rsp), %rsi
movq 200(%rsp), %rdi
call cudaMemcpy@PLT
leaq 208(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC12(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 72(%rsp), %rsi
movq 208(%rsp), %rdi
call cudaMemcpy@PLT
leaq 216(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC13(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 80(%rsp), %rsi
movq 216(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebp, %eax
shrl $2, %eax
movl %eax, %esi
addl $1, %esi
testb $3, %bpl
cmove %eax, %esi
movl %r12d, %edx
shrl $2, %edx
movl %edx, %eax
addl $1, %eax
testb $3, %r12b
cmove %edx, %eax
movl %r13d, %ecx
shrl $4, %ecx
movl %ecx, %edx
addl $1, %edx
testb $15, %r13b
cmove %ecx, %edx
movl %edx, 236(%rsp)
movl %eax, 240(%rsp)
movl $16, 224(%rsp)
movl $4, 228(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 224(%rsp), %rdx
movl $4, %ecx
movq 236(%rsp), %rdi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L22:
movl $2, %ecx
movq %rbx, %rdx
movq 120(%rsp), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 128(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 144(%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 152(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 136(%rsp), %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movsd 88(%rsp), %xmm0
xorpd .LC14(%rip), %xmm0
pushq %r13
.cfi_def_cfa_offset 328
pushq %r12
.cfi_def_cfa_offset 336
pushq %rbp
.cfi_def_cfa_offset 344
pushq 240(%rsp)
.cfi_def_cfa_offset 352
pushq 240(%rsp)
.cfi_def_cfa_offset 360
pushq 240(%rsp)
.cfi_def_cfa_offset 368
pushq 240(%rsp)
.cfi_def_cfa_offset 376
pushq 240(%rsp)
.cfi_def_cfa_offset 384
pushq 240(%rsp)
.cfi_def_cfa_offset 392
pushq 240(%rsp)
.cfi_def_cfa_offset 400
movsd 184(%rsp), %xmm2
movsd 176(%rsp), %xmm1
movq 240(%rsp), %r9
movq 232(%rsp), %r8
movq 224(%rsp), %rcx
movq 216(%rsp), %rdx
movq 208(%rsp), %rsi
movq 200(%rsp), %rdi
call _Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
addq $80, %rsp
.cfi_def_cfa_offset 320
jmp .L22
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size host_code, .-host_code
.section .rodata.str1.8
.align 8
.LC15:
.string "_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC14:
.long 0
.long -2147483648
.long 0
.long 0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
double flux_0kc0jc0ic0 = dxinv0 * 0.8 * _t_1_;
double _t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
flux_0kc0jc0ic0 -= dxinv0 * 0.2 * _t_2_;
double _t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
flux_0kc0jc0ic0 += dxinv0 * 0.038 * _t_3_;
double _t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
flux_0kc0jc0ic0 -= dxinv0 * 0.0035 * _t_4_;
double _t_6_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_6_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_6_ += q_4[k][j][i+1];
_t_6_ -= q_4[k][j][i-1];
double flux_1kc0jc0ic0 = dxinv0 * 0.8 * _t_6_;
double _t_7_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_7_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_7_ += q_4[k][j][i+2];
_t_7_ -= q_4[k][j][i-2];
flux_1kc0jc0ic0 -= dxinv0 * 0.2 * _t_7_;
double _t_8_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_8_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_8_ += q_4[k][j][i+3];
_t_8_ -= q_4[k][j][i-3];
flux_1kc0jc0ic0 += dxinv0 * 0.038 * _t_8_;
double _t_9_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_9_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_9_ += q_4[k][j][i+4];
_t_9_ -= q_4[k][j][i-4];
flux_1kc0jc0ic0 -= dxinv0 * 0.0035 * _t_9_;
double _t_11_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_11_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
double flux_2kc0jc0ic0 = dxinv0 * 0.8 * _t_11_;
double _t_12_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_12_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
flux_2kc0jc0ic0 -= dxinv0 * 0.2 * _t_12_;
double _t_13_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_13_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
flux_2kc0jc0ic0 += dxinv0 * 0.038 * _t_13_;
double _t_14_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_14_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
flux_2kc0jc0ic0 -= dxinv0 * 0.0035 * _t_14_;
double _t_16_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_16_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
double flux_3kc0jc0ic0 = dxinv0 * 0.8 * _t_16_;
double _t_17_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_17_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
flux_3kc0jc0ic0 -= dxinv0 * 0.2 * _t_17_;
double _t_18_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_18_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
flux_3kc0jc0ic0 += dxinv0 * 0.038 * _t_18_;
double _t_19_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_19_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
flux_3kc0jc0ic0 -= dxinv0 * 0.0035 * _t_19_;
double _t_21_ = q_4[k][j][i+1] * q_1[k][j][i+1];
double _v_24_ = cons_4[k][j][i+1] * q_1[k][j][i+1];
_t_21_ += _v_24_;
_t_21_ -= cons_4[k][j][i-1] * q_1[k][j][i-1];
double _v_27_ = q_4[k][j][i-1] * q_1[k][j][i-1];
_t_21_ -= _v_27_;
double flux_4kc0jc0ic0 = dxinv0 * 0.8 * _t_21_;
double _t_22_ = q_4[k][j][i+2] * q_1[k][j][i+2];
double _v_28_ = cons_4[k][j][i+2] * q_1[k][j][i+2];
_t_22_ += _v_28_;
_t_22_ -= cons_4[k][j][i-2] * q_1[k][j][i-2];
double _v_31_ = q_4[k][j][i-2] * q_1[k][j][i-2];
_t_22_ -= _v_31_;
flux_4kc0jc0ic0 -= dxinv0 * 0.2 * _t_22_;
double _t_23_ = q_4[k][j][i+3] * q_1[k][j][i+3];
double _v_32_ = cons_4[k][j][i+3] * q_1[k][j][i+3];
_t_23_ += _v_32_;
_t_23_ -= cons_4[k][j][i-3] * q_1[k][j][i-3];
double _v_35_ = q_4[k][j][i-3] * q_1[k][j][i-3];
_t_23_ -= _v_35_;
flux_4kc0jc0ic0 += dxinv0 * 0.038 * _t_23_;
double _t_24_ = q_4[k][j][i+4] * q_1[k][j][i+4];
double _v_36_ = cons_4[k][j][i+4] * q_1[k][j][i+4];
_t_24_ += _v_36_;
_t_24_ -= cons_4[k][j][i-4] * q_1[k][j][i-4];
double _v_39_ = q_4[k][j][i-4] * q_1[k][j][i-4];
_t_24_ -= _v_39_;
flux_4kc0jc0ic0 -= dxinv0 * 0.0035 * _t_24_;
double _t_27_ = cons_2[k][j+1][i];
_t_27_ -= cons_2[k][j-1][i];
double _t_25_ = dxinv1 * 0.8 * _t_27_;
double _t_28_ = cons_2[k][j+2][i];
_t_28_ -= cons_2[k][j-2][i];
_t_25_ -= dxinv1 * 0.2 * _t_28_;
double _t_29_ = cons_2[k][j+3][i];
_t_29_ -= cons_2[k][j-3][i];
_t_25_ += dxinv1 * 0.038 * _t_29_;
double _t_30_ = cons_2[k][j+4][i];
_t_30_ -= cons_2[k][j-4][i];
_t_25_ -= dxinv1 * 0.0035 * _t_30_;
flux_0kc0jc0ic0 -= _t_25_;
double _t_33_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_33_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
double _t_31_ = dxinv1 * 0.8 * _t_33_;
double _t_34_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_34_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_31_ -= dxinv1 * 0.2 * _t_34_;
double _t_35_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_35_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += dxinv1 * 0.038 * _t_35_;
double _t_36_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_36_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_31_ -= dxinv1 * 0.0035 * _t_36_;
flux_1kc0jc0ic0 -= _t_31_;
double _t_39_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_39_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_39_ += q_4[k][j+1][i];
_t_39_ -= q_4[k][j-1][i];
double _t_37_ = dxinv1 * 0.8 * _t_39_;
double _t_40_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_40_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_40_ += q_4[k][j+2][i];
_t_40_ -= q_4[k][j-2][i];
_t_37_ -= dxinv1 * 0.2 * _t_40_;
double _t_41_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_41_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_41_ += q_4[k][j+3][i];
_t_41_ -= q_4[k][j-3][i];
_t_37_ += dxinv1 * 0.038 * _t_41_;
double _t_42_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_42_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_42_ += q_4[k][j+4][i];
_t_42_ -= q_4[k][j-4][i];
_t_37_ -= dxinv1 * 0.0035 * _t_42_;
flux_2kc0jc0ic0 -= _t_37_;
double _t_45_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_45_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
double _t_43_ = dxinv1 * 0.8 * _t_45_;
double _t_46_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_46_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_43_ -= dxinv1 * 0.2 * _t_46_;
double _t_47_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_47_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_43_ += dxinv1 * 0.038 * _t_47_;
double _t_48_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_48_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_43_ -= dxinv1 * 0.0035 * _t_48_;
flux_3kc0jc0ic0 -= _t_43_;
double _t_51_ = q_4[k][j+1][i] * q_2[k][j+1][i];
double _v_64_ = cons_4[k][j+1][i] * q_2[k][j+1][i];
_t_51_ += _v_64_;
_t_51_ -= cons_4[k][j-1][i] * q_2[k][j-1][i];
double _v_67_ = q_4[k][j-1][i] * q_2[k][j-1][i];
_t_51_ -= _v_67_;
double _t_49_ = dxinv1 * 0.8 * _t_51_;
double _t_52_ = q_4[k][j+2][i] * q_2[k][j+2][i];
double _v_68_ = cons_4[k][j+2][i] * q_2[k][j+2][i];
_t_52_ += _v_68_;
_t_52_ -= cons_4[k][j-2][i] * q_2[k][j-2][i];
double _v_71_ = q_4[k][j-2][i] * q_2[k][j-2][i];
_t_52_ -= _v_71_;
_t_49_ -= dxinv1 * 0.2 * _t_52_;
double _t_53_ = q_4[k][j+3][i] * q_2[k][j+3][i];
double _v_72_ = cons_4[k][j+3][i] * q_2[k][j+3][i];
_t_53_ += _v_72_;
_t_53_ -= cons_4[k][j-3][i] * q_2[k][j-3][i];
double _v_75_ = q_4[k][j-3][i] * q_2[k][j-3][i];
_t_53_ -= _v_75_;
_t_49_ += dxinv1 * 0.038 * _t_53_;
double _t_54_ = q_4[k][j+4][i] * q_2[k][j+4][i];
double _v_76_ = cons_4[k][j+4][i] * q_2[k][j+4][i];
_t_54_ += _v_76_;
_t_54_ -= cons_4[k][j-4][i] * q_2[k][j-4][i];
double _v_79_ = q_4[k][j-4][i] * q_2[k][j-4][i];
_t_54_ -= _v_79_;
_t_49_ -= dxinv1 * 0.0035 * _t_54_;
flux_4kc0jc0ic0 -= _t_49_;
double _t_57_ = cons_3[k+1][j][i];
_t_57_ -= cons_3[k-1][j][i];
double _t_55_ = dxinv2 * 0.8 * _t_57_;
double _t_58_ = cons_3[k+2][j][i];
_t_58_ -= cons_3[k-2][j][i];
_t_55_ -= dxinv2 * 0.2 * _t_58_;
double _t_59_ = cons_3[k+3][j][i];
_t_59_ -= cons_3[k-3][j][i];
_t_55_ += dxinv2 * 0.038 * _t_59_;
double _t_60_ = cons_3[k+4][j][i];
_t_60_ -= cons_3[k-4][j][i];
_t_55_ -= dxinv2 * 0.0035 * _t_60_;
flux_0kc0jc0ic0 -= _t_55_;
double _t_63_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_63_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
double _t_61_ = dxinv2 * 0.8 * _t_63_;
double _t_64_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_64_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_61_ -= dxinv2 * 0.2 * _t_64_;
double _t_65_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_65_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_61_ += dxinv2 * 0.038 * _t_65_;
double _t_66_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_66_ -= cons_1[k-4][j][i] * q_3[k-4][j][i];
_t_61_ -= dxinv2 * 0.0035 * _t_66_;
flux_1kc0jc0ic0 -= _t_61_;
double _t_69_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
_t_69_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
double _t_67_ = dxinv2 * 0.8 * _t_69_;
double _t_70_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_70_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_67_ -= dxinv2 * 0.2 * _t_70_;
double _t_71_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_71_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_67_ += dxinv2 * 0.038 * _t_71_;
double _t_72_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_72_ -= cons_2[k-4][j][i] * q_3[k-4][j][i];
_t_67_ -= dxinv2 * 0.0035 * _t_72_;
flux_2kc0jc0ic0 -= _t_67_;
double _t_75_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_75_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_75_ += q_4[k+1][j][i];
_t_75_ -= q_4[k-1][j][i];
double _t_73_ = dxinv2 * 0.8 * _t_75_;
double _t_76_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_76_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_76_ += q_4[k+2][j][i];
_t_76_ -= q_4[k-2][j][i];
_t_73_ -= dxinv2 * 0.2 * _t_76_;
double _t_77_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_77_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_77_ += q_4[k+3][j][i];
_t_77_ -= q_4[k-3][j][i];
_t_73_ += dxinv2 * 0.038 * _t_77_;
double _t_78_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_78_ -= cons_3[k-4][j][i] * q_3[k-4][j][i];
_t_78_ += q_4[k+4][j][i];
_t_78_ -= q_4[k-4][j][i];
_t_73_ -= dxinv2 * 0.0035 * _t_78_;
flux_3kc0jc0ic0 -= _t_73_;
double _t_81_ = q_4[k+1][j][i] * q_3[k+1][j][i];
double _v_104_ = cons_4[k+1][j][i] * q_3[k+1][j][i];
_t_81_ += _v_104_;
_t_81_ -= cons_4[k-1][j][i] * q_3[k-1][j][i];
double _v_107_ = q_4[k-1][j][i] * q_3[k-1][j][i];
_t_81_ -= _v_107_;
double _t_79_ = dxinv2 * 0.8 * _t_81_;
double _t_82_ = q_4[k+2][j][i] * q_3[k+2][j][i];
double _v_108_ = cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_82_ += _v_108_;
_t_82_ -= cons_4[k-2][j][i] * q_3[k-2][j][i];
double _v_111_ = q_4[k-2][j][i] * q_3[k-2][j][i];
_t_82_ -= _v_111_;
_t_79_ -= dxinv2 * 0.2 * _t_82_;
double _t_83_ = q_4[k+3][j][i] * q_3[k+3][j][i];
double _v_112_ = cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_83_ += _v_112_;
_t_83_ -= cons_4[k-3][j][i] * q_3[k-3][j][i];
double _v_115_ = q_4[k-3][j][i] * q_3[k-3][j][i];
_t_83_ -= _v_115_;
_t_79_ += dxinv2 * 0.038 * _t_83_;
double _t_84_ = q_4[k+4][j][i] * q_3[k+4][j][i];
double _v_116_ = cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_84_ += _v_116_;
_t_84_ -= cons_4[k-4][j][i] * q_3[k-4][j][i];
double _v_119_ = q_4[k-4][j][i] * q_3[k-4][j][i];
_t_84_ -= _v_119_;
_t_79_ -= dxinv2 * 0.0035 * _t_84_;
flux_4kc0jc0ic0 -= _t_79_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux_4[k][j][i] = flux_4kc0jc0ic0;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
} | #include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
double flux_0kc0jc0ic0 = dxinv0 * 0.8 * _t_1_;
double _t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
flux_0kc0jc0ic0 -= dxinv0 * 0.2 * _t_2_;
double _t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
flux_0kc0jc0ic0 += dxinv0 * 0.038 * _t_3_;
double _t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
flux_0kc0jc0ic0 -= dxinv0 * 0.0035 * _t_4_;
double _t_6_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_6_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_6_ += q_4[k][j][i+1];
_t_6_ -= q_4[k][j][i-1];
double flux_1kc0jc0ic0 = dxinv0 * 0.8 * _t_6_;
double _t_7_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_7_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_7_ += q_4[k][j][i+2];
_t_7_ -= q_4[k][j][i-2];
flux_1kc0jc0ic0 -= dxinv0 * 0.2 * _t_7_;
double _t_8_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_8_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_8_ += q_4[k][j][i+3];
_t_8_ -= q_4[k][j][i-3];
flux_1kc0jc0ic0 += dxinv0 * 0.038 * _t_8_;
double _t_9_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_9_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_9_ += q_4[k][j][i+4];
_t_9_ -= q_4[k][j][i-4];
flux_1kc0jc0ic0 -= dxinv0 * 0.0035 * _t_9_;
double _t_11_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_11_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
double flux_2kc0jc0ic0 = dxinv0 * 0.8 * _t_11_;
double _t_12_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_12_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
flux_2kc0jc0ic0 -= dxinv0 * 0.2 * _t_12_;
double _t_13_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_13_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
flux_2kc0jc0ic0 += dxinv0 * 0.038 * _t_13_;
double _t_14_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_14_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
flux_2kc0jc0ic0 -= dxinv0 * 0.0035 * _t_14_;
double _t_16_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_16_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
double flux_3kc0jc0ic0 = dxinv0 * 0.8 * _t_16_;
double _t_17_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_17_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
flux_3kc0jc0ic0 -= dxinv0 * 0.2 * _t_17_;
double _t_18_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_18_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
flux_3kc0jc0ic0 += dxinv0 * 0.038 * _t_18_;
double _t_19_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_19_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
flux_3kc0jc0ic0 -= dxinv0 * 0.0035 * _t_19_;
double _t_21_ = q_4[k][j][i+1] * q_1[k][j][i+1];
double _v_24_ = cons_4[k][j][i+1] * q_1[k][j][i+1];
_t_21_ += _v_24_;
_t_21_ -= cons_4[k][j][i-1] * q_1[k][j][i-1];
double _v_27_ = q_4[k][j][i-1] * q_1[k][j][i-1];
_t_21_ -= _v_27_;
double flux_4kc0jc0ic0 = dxinv0 * 0.8 * _t_21_;
double _t_22_ = q_4[k][j][i+2] * q_1[k][j][i+2];
double _v_28_ = cons_4[k][j][i+2] * q_1[k][j][i+2];
_t_22_ += _v_28_;
_t_22_ -= cons_4[k][j][i-2] * q_1[k][j][i-2];
double _v_31_ = q_4[k][j][i-2] * q_1[k][j][i-2];
_t_22_ -= _v_31_;
flux_4kc0jc0ic0 -= dxinv0 * 0.2 * _t_22_;
double _t_23_ = q_4[k][j][i+3] * q_1[k][j][i+3];
double _v_32_ = cons_4[k][j][i+3] * q_1[k][j][i+3];
_t_23_ += _v_32_;
_t_23_ -= cons_4[k][j][i-3] * q_1[k][j][i-3];
double _v_35_ = q_4[k][j][i-3] * q_1[k][j][i-3];
_t_23_ -= _v_35_;
flux_4kc0jc0ic0 += dxinv0 * 0.038 * _t_23_;
double _t_24_ = q_4[k][j][i+4] * q_1[k][j][i+4];
double _v_36_ = cons_4[k][j][i+4] * q_1[k][j][i+4];
_t_24_ += _v_36_;
_t_24_ -= cons_4[k][j][i-4] * q_1[k][j][i-4];
double _v_39_ = q_4[k][j][i-4] * q_1[k][j][i-4];
_t_24_ -= _v_39_;
flux_4kc0jc0ic0 -= dxinv0 * 0.0035 * _t_24_;
double _t_27_ = cons_2[k][j+1][i];
_t_27_ -= cons_2[k][j-1][i];
double _t_25_ = dxinv1 * 0.8 * _t_27_;
double _t_28_ = cons_2[k][j+2][i];
_t_28_ -= cons_2[k][j-2][i];
_t_25_ -= dxinv1 * 0.2 * _t_28_;
double _t_29_ = cons_2[k][j+3][i];
_t_29_ -= cons_2[k][j-3][i];
_t_25_ += dxinv1 * 0.038 * _t_29_;
double _t_30_ = cons_2[k][j+4][i];
_t_30_ -= cons_2[k][j-4][i];
_t_25_ -= dxinv1 * 0.0035 * _t_30_;
flux_0kc0jc0ic0 -= _t_25_;
double _t_33_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_33_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
double _t_31_ = dxinv1 * 0.8 * _t_33_;
double _t_34_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_34_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_31_ -= dxinv1 * 0.2 * _t_34_;
double _t_35_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_35_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += dxinv1 * 0.038 * _t_35_;
double _t_36_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_36_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_31_ -= dxinv1 * 0.0035 * _t_36_;
flux_1kc0jc0ic0 -= _t_31_;
double _t_39_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_39_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_39_ += q_4[k][j+1][i];
_t_39_ -= q_4[k][j-1][i];
double _t_37_ = dxinv1 * 0.8 * _t_39_;
double _t_40_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_40_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_40_ += q_4[k][j+2][i];
_t_40_ -= q_4[k][j-2][i];
_t_37_ -= dxinv1 * 0.2 * _t_40_;
double _t_41_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_41_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_41_ += q_4[k][j+3][i];
_t_41_ -= q_4[k][j-3][i];
_t_37_ += dxinv1 * 0.038 * _t_41_;
double _t_42_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_42_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_42_ += q_4[k][j+4][i];
_t_42_ -= q_4[k][j-4][i];
_t_37_ -= dxinv1 * 0.0035 * _t_42_;
flux_2kc0jc0ic0 -= _t_37_;
double _t_45_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_45_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
double _t_43_ = dxinv1 * 0.8 * _t_45_;
double _t_46_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_46_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_43_ -= dxinv1 * 0.2 * _t_46_;
double _t_47_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_47_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_43_ += dxinv1 * 0.038 * _t_47_;
double _t_48_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_48_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_43_ -= dxinv1 * 0.0035 * _t_48_;
flux_3kc0jc0ic0 -= _t_43_;
double _t_51_ = q_4[k][j+1][i] * q_2[k][j+1][i];
double _v_64_ = cons_4[k][j+1][i] * q_2[k][j+1][i];
_t_51_ += _v_64_;
_t_51_ -= cons_4[k][j-1][i] * q_2[k][j-1][i];
double _v_67_ = q_4[k][j-1][i] * q_2[k][j-1][i];
_t_51_ -= _v_67_;
double _t_49_ = dxinv1 * 0.8 * _t_51_;
double _t_52_ = q_4[k][j+2][i] * q_2[k][j+2][i];
double _v_68_ = cons_4[k][j+2][i] * q_2[k][j+2][i];
_t_52_ += _v_68_;
_t_52_ -= cons_4[k][j-2][i] * q_2[k][j-2][i];
double _v_71_ = q_4[k][j-2][i] * q_2[k][j-2][i];
_t_52_ -= _v_71_;
_t_49_ -= dxinv1 * 0.2 * _t_52_;
double _t_53_ = q_4[k][j+3][i] * q_2[k][j+3][i];
double _v_72_ = cons_4[k][j+3][i] * q_2[k][j+3][i];
_t_53_ += _v_72_;
_t_53_ -= cons_4[k][j-3][i] * q_2[k][j-3][i];
double _v_75_ = q_4[k][j-3][i] * q_2[k][j-3][i];
_t_53_ -= _v_75_;
_t_49_ += dxinv1 * 0.038 * _t_53_;
double _t_54_ = q_4[k][j+4][i] * q_2[k][j+4][i];
double _v_76_ = cons_4[k][j+4][i] * q_2[k][j+4][i];
_t_54_ += _v_76_;
_t_54_ -= cons_4[k][j-4][i] * q_2[k][j-4][i];
double _v_79_ = q_4[k][j-4][i] * q_2[k][j-4][i];
_t_54_ -= _v_79_;
_t_49_ -= dxinv1 * 0.0035 * _t_54_;
flux_4kc0jc0ic0 -= _t_49_;
double _t_57_ = cons_3[k+1][j][i];
_t_57_ -= cons_3[k-1][j][i];
double _t_55_ = dxinv2 * 0.8 * _t_57_;
double _t_58_ = cons_3[k+2][j][i];
_t_58_ -= cons_3[k-2][j][i];
_t_55_ -= dxinv2 * 0.2 * _t_58_;
double _t_59_ = cons_3[k+3][j][i];
_t_59_ -= cons_3[k-3][j][i];
_t_55_ += dxinv2 * 0.038 * _t_59_;
double _t_60_ = cons_3[k+4][j][i];
_t_60_ -= cons_3[k-4][j][i];
_t_55_ -= dxinv2 * 0.0035 * _t_60_;
flux_0kc0jc0ic0 -= _t_55_;
double _t_63_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_63_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
double _t_61_ = dxinv2 * 0.8 * _t_63_;
double _t_64_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_64_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_61_ -= dxinv2 * 0.2 * _t_64_;
double _t_65_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_65_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_61_ += dxinv2 * 0.038 * _t_65_;
double _t_66_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_66_ -= cons_1[k-4][j][i] * q_3[k-4][j][i];
_t_61_ -= dxinv2 * 0.0035 * _t_66_;
flux_1kc0jc0ic0 -= _t_61_;
double _t_69_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
_t_69_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
double _t_67_ = dxinv2 * 0.8 * _t_69_;
double _t_70_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_70_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_67_ -= dxinv2 * 0.2 * _t_70_;
double _t_71_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_71_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_67_ += dxinv2 * 0.038 * _t_71_;
double _t_72_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_72_ -= cons_2[k-4][j][i] * q_3[k-4][j][i];
_t_67_ -= dxinv2 * 0.0035 * _t_72_;
flux_2kc0jc0ic0 -= _t_67_;
double _t_75_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_75_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_75_ += q_4[k+1][j][i];
_t_75_ -= q_4[k-1][j][i];
double _t_73_ = dxinv2 * 0.8 * _t_75_;
double _t_76_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_76_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_76_ += q_4[k+2][j][i];
_t_76_ -= q_4[k-2][j][i];
_t_73_ -= dxinv2 * 0.2 * _t_76_;
double _t_77_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_77_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_77_ += q_4[k+3][j][i];
_t_77_ -= q_4[k-3][j][i];
_t_73_ += dxinv2 * 0.038 * _t_77_;
double _t_78_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_78_ -= cons_3[k-4][j][i] * q_3[k-4][j][i];
_t_78_ += q_4[k+4][j][i];
_t_78_ -= q_4[k-4][j][i];
_t_73_ -= dxinv2 * 0.0035 * _t_78_;
flux_3kc0jc0ic0 -= _t_73_;
double _t_81_ = q_4[k+1][j][i] * q_3[k+1][j][i];
double _v_104_ = cons_4[k+1][j][i] * q_3[k+1][j][i];
_t_81_ += _v_104_;
_t_81_ -= cons_4[k-1][j][i] * q_3[k-1][j][i];
double _v_107_ = q_4[k-1][j][i] * q_3[k-1][j][i];
_t_81_ -= _v_107_;
double _t_79_ = dxinv2 * 0.8 * _t_81_;
double _t_82_ = q_4[k+2][j][i] * q_3[k+2][j][i];
double _v_108_ = cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_82_ += _v_108_;
_t_82_ -= cons_4[k-2][j][i] * q_3[k-2][j][i];
double _v_111_ = q_4[k-2][j][i] * q_3[k-2][j][i];
_t_82_ -= _v_111_;
_t_79_ -= dxinv2 * 0.2 * _t_82_;
double _t_83_ = q_4[k+3][j][i] * q_3[k+3][j][i];
double _v_112_ = cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_83_ += _v_112_;
_t_83_ -= cons_4[k-3][j][i] * q_3[k-3][j][i];
double _v_115_ = q_4[k-3][j][i] * q_3[k-3][j][i];
_t_83_ -= _v_115_;
_t_79_ += dxinv2 * 0.038 * _t_83_;
double _t_84_ = q_4[k+4][j][i] * q_3[k+4][j][i];
double _v_116_ = cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_84_ += _v_116_;
_t_84_ -= cons_4[k-4][j][i] * q_3[k-4][j][i];
double _v_119_ = q_4[k-4][j][i] * q_3[k-4][j][i];
_t_84_ -= _v_119_;
_t_79_ -= dxinv2 * 0.0035 * _t_84_;
flux_4kc0jc0ic0 -= _t_79_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux_4[k][j][i] = flux_4kc0jc0ic0;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
hipMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
hipMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_1;
hipMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
hipMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_2;
hipMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
hipMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_3;
hipMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
hipMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_4;
hipMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
hipMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_1;
hipMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
hipMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_2;
hipMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
hipMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_3;
hipMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
hipMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_4;
hipMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
hipMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_1;
hipMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
hipMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_2;
hipMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
hipMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_3;
hipMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
hipMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_4;
hipMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
hipMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
hipMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
double flux_0kc0jc0ic0 = dxinv0 * 0.8 * _t_1_;
double _t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
flux_0kc0jc0ic0 -= dxinv0 * 0.2 * _t_2_;
double _t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
flux_0kc0jc0ic0 += dxinv0 * 0.038 * _t_3_;
double _t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
flux_0kc0jc0ic0 -= dxinv0 * 0.0035 * _t_4_;
double _t_6_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_6_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_6_ += q_4[k][j][i+1];
_t_6_ -= q_4[k][j][i-1];
double flux_1kc0jc0ic0 = dxinv0 * 0.8 * _t_6_;
double _t_7_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_7_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_7_ += q_4[k][j][i+2];
_t_7_ -= q_4[k][j][i-2];
flux_1kc0jc0ic0 -= dxinv0 * 0.2 * _t_7_;
double _t_8_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_8_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_8_ += q_4[k][j][i+3];
_t_8_ -= q_4[k][j][i-3];
flux_1kc0jc0ic0 += dxinv0 * 0.038 * _t_8_;
double _t_9_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_9_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_9_ += q_4[k][j][i+4];
_t_9_ -= q_4[k][j][i-4];
flux_1kc0jc0ic0 -= dxinv0 * 0.0035 * _t_9_;
double _t_11_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_11_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
double flux_2kc0jc0ic0 = dxinv0 * 0.8 * _t_11_;
double _t_12_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_12_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
flux_2kc0jc0ic0 -= dxinv0 * 0.2 * _t_12_;
double _t_13_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_13_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
flux_2kc0jc0ic0 += dxinv0 * 0.038 * _t_13_;
double _t_14_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_14_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
flux_2kc0jc0ic0 -= dxinv0 * 0.0035 * _t_14_;
double _t_16_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_16_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
double flux_3kc0jc0ic0 = dxinv0 * 0.8 * _t_16_;
double _t_17_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_17_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
flux_3kc0jc0ic0 -= dxinv0 * 0.2 * _t_17_;
double _t_18_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_18_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
flux_3kc0jc0ic0 += dxinv0 * 0.038 * _t_18_;
double _t_19_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_19_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
flux_3kc0jc0ic0 -= dxinv0 * 0.0035 * _t_19_;
double _t_21_ = q_4[k][j][i+1] * q_1[k][j][i+1];
double _v_24_ = cons_4[k][j][i+1] * q_1[k][j][i+1];
_t_21_ += _v_24_;
_t_21_ -= cons_4[k][j][i-1] * q_1[k][j][i-1];
double _v_27_ = q_4[k][j][i-1] * q_1[k][j][i-1];
_t_21_ -= _v_27_;
double flux_4kc0jc0ic0 = dxinv0 * 0.8 * _t_21_;
double _t_22_ = q_4[k][j][i+2] * q_1[k][j][i+2];
double _v_28_ = cons_4[k][j][i+2] * q_1[k][j][i+2];
_t_22_ += _v_28_;
_t_22_ -= cons_4[k][j][i-2] * q_1[k][j][i-2];
double _v_31_ = q_4[k][j][i-2] * q_1[k][j][i-2];
_t_22_ -= _v_31_;
flux_4kc0jc0ic0 -= dxinv0 * 0.2 * _t_22_;
double _t_23_ = q_4[k][j][i+3] * q_1[k][j][i+3];
double _v_32_ = cons_4[k][j][i+3] * q_1[k][j][i+3];
_t_23_ += _v_32_;
_t_23_ -= cons_4[k][j][i-3] * q_1[k][j][i-3];
double _v_35_ = q_4[k][j][i-3] * q_1[k][j][i-3];
_t_23_ -= _v_35_;
flux_4kc0jc0ic0 += dxinv0 * 0.038 * _t_23_;
double _t_24_ = q_4[k][j][i+4] * q_1[k][j][i+4];
double _v_36_ = cons_4[k][j][i+4] * q_1[k][j][i+4];
_t_24_ += _v_36_;
_t_24_ -= cons_4[k][j][i-4] * q_1[k][j][i-4];
double _v_39_ = q_4[k][j][i-4] * q_1[k][j][i-4];
_t_24_ -= _v_39_;
flux_4kc0jc0ic0 -= dxinv0 * 0.0035 * _t_24_;
double _t_27_ = cons_2[k][j+1][i];
_t_27_ -= cons_2[k][j-1][i];
double _t_25_ = dxinv1 * 0.8 * _t_27_;
double _t_28_ = cons_2[k][j+2][i];
_t_28_ -= cons_2[k][j-2][i];
_t_25_ -= dxinv1 * 0.2 * _t_28_;
double _t_29_ = cons_2[k][j+3][i];
_t_29_ -= cons_2[k][j-3][i];
_t_25_ += dxinv1 * 0.038 * _t_29_;
double _t_30_ = cons_2[k][j+4][i];
_t_30_ -= cons_2[k][j-4][i];
_t_25_ -= dxinv1 * 0.0035 * _t_30_;
flux_0kc0jc0ic0 -= _t_25_;
double _t_33_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_33_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
double _t_31_ = dxinv1 * 0.8 * _t_33_;
double _t_34_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_34_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_31_ -= dxinv1 * 0.2 * _t_34_;
double _t_35_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_35_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += dxinv1 * 0.038 * _t_35_;
double _t_36_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_36_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_31_ -= dxinv1 * 0.0035 * _t_36_;
flux_1kc0jc0ic0 -= _t_31_;
double _t_39_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_39_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_39_ += q_4[k][j+1][i];
_t_39_ -= q_4[k][j-1][i];
double _t_37_ = dxinv1 * 0.8 * _t_39_;
double _t_40_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_40_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_40_ += q_4[k][j+2][i];
_t_40_ -= q_4[k][j-2][i];
_t_37_ -= dxinv1 * 0.2 * _t_40_;
double _t_41_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_41_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_41_ += q_4[k][j+3][i];
_t_41_ -= q_4[k][j-3][i];
_t_37_ += dxinv1 * 0.038 * _t_41_;
double _t_42_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_42_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_42_ += q_4[k][j+4][i];
_t_42_ -= q_4[k][j-4][i];
_t_37_ -= dxinv1 * 0.0035 * _t_42_;
flux_2kc0jc0ic0 -= _t_37_;
double _t_45_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_45_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
double _t_43_ = dxinv1 * 0.8 * _t_45_;
double _t_46_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_46_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_43_ -= dxinv1 * 0.2 * _t_46_;
double _t_47_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_47_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_43_ += dxinv1 * 0.038 * _t_47_;
double _t_48_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_48_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_43_ -= dxinv1 * 0.0035 * _t_48_;
flux_3kc0jc0ic0 -= _t_43_;
double _t_51_ = q_4[k][j+1][i] * q_2[k][j+1][i];
double _v_64_ = cons_4[k][j+1][i] * q_2[k][j+1][i];
_t_51_ += _v_64_;
_t_51_ -= cons_4[k][j-1][i] * q_2[k][j-1][i];
double _v_67_ = q_4[k][j-1][i] * q_2[k][j-1][i];
_t_51_ -= _v_67_;
double _t_49_ = dxinv1 * 0.8 * _t_51_;
double _t_52_ = q_4[k][j+2][i] * q_2[k][j+2][i];
double _v_68_ = cons_4[k][j+2][i] * q_2[k][j+2][i];
_t_52_ += _v_68_;
_t_52_ -= cons_4[k][j-2][i] * q_2[k][j-2][i];
double _v_71_ = q_4[k][j-2][i] * q_2[k][j-2][i];
_t_52_ -= _v_71_;
_t_49_ -= dxinv1 * 0.2 * _t_52_;
double _t_53_ = q_4[k][j+3][i] * q_2[k][j+3][i];
double _v_72_ = cons_4[k][j+3][i] * q_2[k][j+3][i];
_t_53_ += _v_72_;
_t_53_ -= cons_4[k][j-3][i] * q_2[k][j-3][i];
double _v_75_ = q_4[k][j-3][i] * q_2[k][j-3][i];
_t_53_ -= _v_75_;
_t_49_ += dxinv1 * 0.038 * _t_53_;
double _t_54_ = q_4[k][j+4][i] * q_2[k][j+4][i];
double _v_76_ = cons_4[k][j+4][i] * q_2[k][j+4][i];
_t_54_ += _v_76_;
_t_54_ -= cons_4[k][j-4][i] * q_2[k][j-4][i];
double _v_79_ = q_4[k][j-4][i] * q_2[k][j-4][i];
_t_54_ -= _v_79_;
_t_49_ -= dxinv1 * 0.0035 * _t_54_;
flux_4kc0jc0ic0 -= _t_49_;
double _t_57_ = cons_3[k+1][j][i];
_t_57_ -= cons_3[k-1][j][i];
double _t_55_ = dxinv2 * 0.8 * _t_57_;
double _t_58_ = cons_3[k+2][j][i];
_t_58_ -= cons_3[k-2][j][i];
_t_55_ -= dxinv2 * 0.2 * _t_58_;
double _t_59_ = cons_3[k+3][j][i];
_t_59_ -= cons_3[k-3][j][i];
_t_55_ += dxinv2 * 0.038 * _t_59_;
double _t_60_ = cons_3[k+4][j][i];
_t_60_ -= cons_3[k-4][j][i];
_t_55_ -= dxinv2 * 0.0035 * _t_60_;
flux_0kc0jc0ic0 -= _t_55_;
double _t_63_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_63_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
double _t_61_ = dxinv2 * 0.8 * _t_63_;
double _t_64_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_64_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_61_ -= dxinv2 * 0.2 * _t_64_;
double _t_65_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_65_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_61_ += dxinv2 * 0.038 * _t_65_;
double _t_66_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_66_ -= cons_1[k-4][j][i] * q_3[k-4][j][i];
_t_61_ -= dxinv2 * 0.0035 * _t_66_;
flux_1kc0jc0ic0 -= _t_61_;
double _t_69_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
_t_69_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
double _t_67_ = dxinv2 * 0.8 * _t_69_;
double _t_70_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_70_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_67_ -= dxinv2 * 0.2 * _t_70_;
double _t_71_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_71_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_67_ += dxinv2 * 0.038 * _t_71_;
double _t_72_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_72_ -= cons_2[k-4][j][i] * q_3[k-4][j][i];
_t_67_ -= dxinv2 * 0.0035 * _t_72_;
flux_2kc0jc0ic0 -= _t_67_;
double _t_75_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_75_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_75_ += q_4[k+1][j][i];
_t_75_ -= q_4[k-1][j][i];
double _t_73_ = dxinv2 * 0.8 * _t_75_;
double _t_76_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_76_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_76_ += q_4[k+2][j][i];
_t_76_ -= q_4[k-2][j][i];
_t_73_ -= dxinv2 * 0.2 * _t_76_;
double _t_77_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_77_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_77_ += q_4[k+3][j][i];
_t_77_ -= q_4[k-3][j][i];
_t_73_ += dxinv2 * 0.038 * _t_77_;
double _t_78_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_78_ -= cons_3[k-4][j][i] * q_3[k-4][j][i];
_t_78_ += q_4[k+4][j][i];
_t_78_ -= q_4[k-4][j][i];
_t_73_ -= dxinv2 * 0.0035 * _t_78_;
flux_3kc0jc0ic0 -= _t_73_;
double _t_81_ = q_4[k+1][j][i] * q_3[k+1][j][i];
double _v_104_ = cons_4[k+1][j][i] * q_3[k+1][j][i];
_t_81_ += _v_104_;
_t_81_ -= cons_4[k-1][j][i] * q_3[k-1][j][i];
double _v_107_ = q_4[k-1][j][i] * q_3[k-1][j][i];
_t_81_ -= _v_107_;
double _t_79_ = dxinv2 * 0.8 * _t_81_;
double _t_82_ = q_4[k+2][j][i] * q_3[k+2][j][i];
double _v_108_ = cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_82_ += _v_108_;
_t_82_ -= cons_4[k-2][j][i] * q_3[k-2][j][i];
double _v_111_ = q_4[k-2][j][i] * q_3[k-2][j][i];
_t_82_ -= _v_111_;
_t_79_ -= dxinv2 * 0.2 * _t_82_;
double _t_83_ = q_4[k+3][j][i] * q_3[k+3][j][i];
double _v_112_ = cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_83_ += _v_112_;
_t_83_ -= cons_4[k-3][j][i] * q_3[k-3][j][i];
double _v_115_ = q_4[k-3][j][i] * q_3[k-3][j][i];
_t_83_ -= _v_115_;
_t_79_ += dxinv2 * 0.038 * _t_83_;
double _t_84_ = q_4[k+4][j][i] * q_3[k+4][j][i];
double _v_116_ = cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_84_ += _v_116_;
_t_84_ -= cons_4[k-4][j][i] * q_3[k-4][j][i];
double _v_119_ = q_4[k-4][j][i] * q_3[k-4][j][i];
_t_84_ -= _v_119_;
_t_79_ -= dxinv2 * 0.0035 * _t_84_;
flux_4kc0jc0ic0 -= _t_79_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux_4[k][j][i] = flux_4kc0jc0ic0;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
hipMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
hipMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_1;
hipMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
hipMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_2;
hipMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
hipMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_3;
hipMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
hipMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_4;
hipMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
hipMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_1;
hipMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
hipMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_2;
hipMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
hipMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_3;
hipMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
hipMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_4;
hipMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
hipMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_1;
hipMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
hipMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_2;
hipMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
hipMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_3;
hipMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
hipMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_4;
hipMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
hipMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
hipMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
} | .text
.file "max-reordered-b.hip"
.globl _Z11check_errorPKc # -- Begin function _Z11check_errorPKc
.p2align 4, 0x90
.type _Z11check_errorPKc,@function
_Z11check_errorPKc: # @_Z11check_errorPKc
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movq %rdi, %rbx
callq hipGetLastError
testl %eax, %eax
jne .LBB0_2
# %bb.1:
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB0_2:
.cfi_def_cfa_offset 16
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rbx, %rsi
movq %rax, %rdx
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.Lfunc_end0:
.size _Z11check_errorPKc, .Lfunc_end0-_Z11check_errorPKc
.cfi_endproc
# -- End function
.globl _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii # -- Begin function _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.p2align 4, 0x90
.type _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii,@function
_Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii: # @_Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.cfi_startproc
# %bb.0:
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 288
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movsd %xmm0, 72(%rsp)
movsd %xmm1, 64(%rsp)
movsd %xmm2, 56(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 288(%rsp), %rax
movq %rax, 176(%rsp)
leaq 296(%rsp), %rax
movq %rax, 184(%rsp)
leaq 304(%rsp), %rax
movq %rax, 192(%rsp)
leaq 312(%rsp), %rax
movq %rax, 200(%rsp)
leaq 320(%rsp), %rax
movq %rax, 208(%rsp)
leaq 328(%rsp), %rax
movq %rax, 216(%rsp)
leaq 336(%rsp), %rax
movq %rax, 224(%rsp)
leaq 72(%rsp), %rax
movq %rax, 232(%rsp)
leaq 64(%rsp), %rax
movq %rax, 240(%rsp)
leaq 56(%rsp), %rax
movq %rax, 248(%rsp)
leaq 344(%rsp), %rax
movq %rax, 256(%rsp)
leaq 352(%rsp), %rax
movq %rax, 264(%rsp)
leaq 360(%rsp), %rax
movq %rax, 272(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $296, %rsp # imm = 0x128
.cfi_adjust_cfa_offset -296
retq
.Lfunc_end1:
.size _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, .Lfunc_end1-_Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function host_code
.LCPI2_0:
.quad 0x8000000000000000 # double -0
.quad 0x8000000000000000 # double -0
.text
.globl host_code
.p2align 4, 0x90
.type host_code,@function
host_code: # @host_code
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 592
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movsd %xmm2, 168(%rsp) # 8-byte Spill
movsd %xmm1, 160(%rsp) # 8-byte Spill
movaps %xmm0, 512(%rsp) # 16-byte Spill
movq %r9, %rbx
movq %r8, 64(%rsp) # 8-byte Spill
movq %rcx, 72(%rsp) # 8-byte Spill
movq %rdx, 80(%rsp) # 8-byte Spill
movq %rsi, 88(%rsp) # 8-byte Spill
movq %rdi, %rbp
movslq 664(%rsp), %r15
movslq 656(%rsp), %r12
movslq 648(%rsp), %r13
movq %r13, %r14
imulq %r12, %r14
imulq %r15, %r14
shlq $3, %r14
leaq 40(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_1
# %bb.3: # %_Z11check_errorPKc.exit
movq 40(%rsp), %rdi
movq %rbp, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 32(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_4
# %bb.5: # %_Z11check_errorPKc.exit132
movq 32(%rsp), %rdi
movq 88(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_6
# %bb.7: # %_Z11check_errorPKc.exit134
movq 24(%rsp), %rdi
movq 80(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_8
# %bb.9: # %_Z11check_errorPKc.exit136
movq 16(%rsp), %rdi
movq 72(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_10
# %bb.11: # %_Z11check_errorPKc.exit138
movq 8(%rsp), %rdi
movq 64(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 152(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_12
# %bb.13: # %_Z11check_errorPKc.exit140
movq 152(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 144(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_14
# %bb.15: # %_Z11check_errorPKc.exit142
movq 592(%rsp), %rsi
movq 144(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 136(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_16
# %bb.17: # %_Z11check_errorPKc.exit144
movq 600(%rsp), %rsi
movq 136(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 128(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_18
# %bb.19: # %_Z11check_errorPKc.exit146
movq 608(%rsp), %rsi
movq 128(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 120(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_20
# %bb.21: # %_Z11check_errorPKc.exit148
movq 616(%rsp), %rsi
movq 120(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 112(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_22
# %bb.23: # %_Z11check_errorPKc.exit150
movq 624(%rsp), %rsi
movq 112(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 104(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_24
# %bb.25: # %_Z11check_errorPKc.exit152
movq 632(%rsp), %rsi
movq 104(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 96(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_26
# %bb.27: # %_Z11check_errorPKc.exit154
movq 640(%rsp), %rsi
movq 96(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r15d, %eax
shrl $4, %eax
leal 1(%rax), %ecx
testb $15, %r15b
cmovel %eax, %ecx
movl %r12d, %eax
shrl $2, %eax
leal 1(%rax), %edi
testb $3, %r12b
cmovel %eax, %edi
movl %r13d, %eax
shrl $2, %eax
leal 1(%rax), %esi
testb $3, %r13b
cmovel %eax, %esi
shlq $32, %rdi
orq %rcx, %rdi
movabsq $17179869200, %rdx # imm = 0x400000010
movl $4, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_29
# %bb.28:
movq 40(%rsp), %rax
movq %rax, 344(%rsp)
movq 32(%rsp), %rax
movq %rax, 336(%rsp)
movq 24(%rsp), %rax
movq %rax, 328(%rsp)
movq 16(%rsp), %rax
movq %rax, 320(%rsp)
movq 8(%rsp), %rax
movq %rax, 312(%rsp)
movq 152(%rsp), %rax
movq %rax, 304(%rsp)
movq 144(%rsp), %rax
movq %rax, 296(%rsp)
movq 136(%rsp), %rax
movq %rax, 288(%rsp)
movq 128(%rsp), %rax
movq %rax, 280(%rsp)
movq 120(%rsp), %rax
movq %rax, 272(%rsp)
movq 112(%rsp), %rax
movq %rax, 264(%rsp)
movq 104(%rsp), %rax
movq %rax, 256(%rsp)
movq 96(%rsp), %rax
movq %rax, 248(%rsp)
movl %r13d, 60(%rsp)
movl %r12d, 56(%rsp)
movaps 512(%rsp), %xmm0 # 16-byte Reload
xorps .LCPI2_0(%rip), %xmm0
movl %r15d, 52(%rsp)
leaq 344(%rsp), %rax
movq %rax, 352(%rsp)
leaq 336(%rsp), %rax
movq %rax, 360(%rsp)
leaq 328(%rsp), %rax
movq %rax, 368(%rsp)
leaq 320(%rsp), %rax
movq %rax, 376(%rsp)
leaq 312(%rsp), %rax
movq %rax, 384(%rsp)
leaq 304(%rsp), %rax
movq %rax, 392(%rsp)
leaq 296(%rsp), %rax
movq %rax, 400(%rsp)
leaq 288(%rsp), %rax
movq %rax, 408(%rsp)
leaq 280(%rsp), %rax
movq %rax, 416(%rsp)
leaq 272(%rsp), %rax
movq %rax, 424(%rsp)
leaq 264(%rsp), %rax
movq %rax, 432(%rsp)
leaq 256(%rsp), %rax
movq %rax, 440(%rsp)
leaq 248(%rsp), %rax
movq %rax, 448(%rsp)
leaq 240(%rsp), %rax
movq %rax, 456(%rsp)
leaq 232(%rsp), %rax
movq %rax, 464(%rsp)
leaq 224(%rsp), %rax
movq %rax, 472(%rsp)
leaq 60(%rsp), %rax
movq %rax, 480(%rsp)
leaq 56(%rsp), %rax
movq %rax, 488(%rsp)
leaq 52(%rsp), %rax
movq %rax, 496(%rsp)
movlps %xmm0, 240(%rsp)
movsd 160(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd %xmm0, 232(%rsp)
movsd 168(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd %xmm0, 224(%rsp)
leaq 208(%rsp), %rdi
leaq 192(%rsp), %rsi
leaq 184(%rsp), %rdx
leaq 176(%rsp), %rcx
callq __hipPopCallConfiguration
movq 208(%rsp), %rsi
movl 216(%rsp), %edx
movq 192(%rsp), %rcx
movl 200(%rsp), %r8d
leaq 352(%rsp), %r9
movl $_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, %edi
pushq 176(%rsp)
.cfi_adjust_cfa_offset 8
pushq 192(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_29:
movq 40(%rsp), %rsi
movq %rbp, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 32(%rsp), %rsi
movq 88(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rsi
movq 72(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
movq 64(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rsi
movq 80(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
addq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_1:
.cfi_def_cfa_offset 592
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.1, %esi
jmp .LBB2_2
.LBB2_4:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
jmp .LBB2_2
.LBB2_6:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.3, %esi
jmp .LBB2_2
.LBB2_8:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.4, %esi
jmp .LBB2_2
.LBB2_10:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.5, %esi
jmp .LBB2_2
.LBB2_12:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.6, %esi
jmp .LBB2_2
.LBB2_14:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.7, %esi
jmp .LBB2_2
.LBB2_16:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.8, %esi
jmp .LBB2_2
.LBB2_18:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.9, %esi
jmp .LBB2_2
.LBB2_20:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.10, %esi
jmp .LBB2_2
.LBB2_22:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.11, %esi
jmp .LBB2_2
.LBB2_24:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.12, %esi
jmp .LBB2_2
.LBB2_26:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.13, %esi
.LBB2_2:
movq %rax, %rdx
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.Lfunc_end2:
.size host_code, .Lfunc_end2-host_code
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA error : %s, %s\n"
.size .L.str, 21
.type _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii,@object # @_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.section .rodata,"a",@progbits
.globl _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.p2align 3, 0x0
_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii:
.quad _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.size _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Failed to allocate device memory for flux_0\n"
.size .L.str.1, 45
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Failed to allocate device memory for flux_1\n"
.size .L.str.2, 45
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Failed to allocate device memory for flux_2\n"
.size .L.str.3, 45
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Failed to allocate device memory for flux_3\n"
.size .L.str.4, 45
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Failed to allocate device memory for flux_4\n"
.size .L.str.5, 45
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Failed to allocate device memory for cons_1\n"
.size .L.str.6, 45
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Failed to allocate device memory for cons_2\n"
.size .L.str.7, 45
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Failed to allocate device memory for cons_3\n"
.size .L.str.8, 45
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Failed to allocate device memory for cons_4\n"
.size .L.str.9, 45
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Failed to allocate device memory for q_1\n"
.size .L.str.10, 42
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Failed to allocate device memory for q_2\n"
.size .L.str.11, 42
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "Failed to allocate device memory for q_3\n"
.size .L.str.12, 42
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "Failed to allocate device memory for q_4\n"
.size .L.str.13, 42
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii"
.size .L__unnamed_1, 43
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000c598c_00000000-6_max-reordered-b.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "CUDA error : %s, %s\n"
.text
.globl _Z11check_errorPKc
.type _Z11check_errorPKc, @function
_Z11check_errorPKc:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
call cudaGetLastError@PLT
testl %eax, %eax
jne .L6
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movq %rbx, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z11check_errorPKc, .-_Z11check_errorPKc
.globl _Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.type _Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, @function
_Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii:
.LFB2083:
.cfi_startproc
endbr64
subq $376, %rsp
.cfi_def_cfa_offset 384
movsd %xmm0, 24(%rsp)
movsd %xmm1, 16(%rsp)
movsd %xmm2, 8(%rsp)
movq %fs:40, %rax
movq %rax, 360(%rsp)
xorl %eax, %eax
movq %rdi, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 208(%rsp)
movq %rsi, 48(%rsp)
leaq 48(%rsp), %rax
movq %rax, 216(%rsp)
movq %rdx, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 224(%rsp)
movq %rcx, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, 232(%rsp)
movq %r8, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 240(%rsp)
movq %r9, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 248(%rsp)
movq 384(%rsp), %rax
movq %rax, 88(%rsp)
leaq 88(%rsp), %rax
movq %rax, 256(%rsp)
movq 392(%rsp), %rax
movq %rax, 96(%rsp)
leaq 96(%rsp), %rax
movq %rax, 264(%rsp)
movq 400(%rsp), %rax
movq %rax, 104(%rsp)
leaq 104(%rsp), %rax
movq %rax, 272(%rsp)
movq 408(%rsp), %rax
movq %rax, 112(%rsp)
leaq 112(%rsp), %rax
movq %rax, 280(%rsp)
movq 416(%rsp), %rax
movq %rax, 120(%rsp)
leaq 120(%rsp), %rax
movq %rax, 288(%rsp)
movq 424(%rsp), %rax
movq %rax, 128(%rsp)
leaq 128(%rsp), %rax
movq %rax, 296(%rsp)
movq 432(%rsp), %rax
movq %rax, 136(%rsp)
leaq 136(%rsp), %rax
movq %rax, 304(%rsp)
leaq 24(%rsp), %rax
movq %rax, 312(%rsp)
leaq 16(%rsp), %rax
movq %rax, 320(%rsp)
leaq 8(%rsp), %rax
movq %rax, 328(%rsp)
leaq 440(%rsp), %rax
movq %rax, 336(%rsp)
leaq 448(%rsp), %rax
movq %rax, 344(%rsp)
leaq 456(%rsp), %rax
movq %rax, 352(%rsp)
movl $1, 160(%rsp)
movl $1, 164(%rsp)
movl $1, 168(%rsp)
movl $1, 172(%rsp)
movl $1, 176(%rsp)
movl $1, 180(%rsp)
leaq 152(%rsp), %rcx
leaq 144(%rsp), %rdx
leaq 172(%rsp), %rsi
leaq 160(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 360(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $376, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 152(%rsp)
.cfi_def_cfa_offset 392
pushq 152(%rsp)
.cfi_def_cfa_offset 400
leaq 224(%rsp), %r9
movq 188(%rsp), %rcx
movl 196(%rsp), %r8d
movq 176(%rsp), %rsi
movl 184(%rsp), %edx
leaq _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 384
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, .-_Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.globl _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.type _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, @function
_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 88(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 88(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 88(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
pushq 88(%rsp)
.cfi_def_cfa_offset 48
pushq 88(%rsp)
.cfi_def_cfa_offset 56
pushq 88(%rsp)
.cfi_def_cfa_offset 64
pushq 88(%rsp)
.cfi_def_cfa_offset 72
pushq 88(%rsp)
.cfi_def_cfa_offset 80
pushq 88(%rsp)
.cfi_def_cfa_offset 88
pushq 88(%rsp)
.cfi_def_cfa_offset 96
call _Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
addq $88, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, .-_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Failed to allocate device memory for flux_0\n"
.align 8
.LC2:
.string "Failed to allocate device memory for flux_1\n"
.align 8
.LC3:
.string "Failed to allocate device memory for flux_2\n"
.align 8
.LC4:
.string "Failed to allocate device memory for flux_3\n"
.align 8
.LC5:
.string "Failed to allocate device memory for flux_4\n"
.align 8
.LC6:
.string "Failed to allocate device memory for cons_1\n"
.align 8
.LC7:
.string "Failed to allocate device memory for cons_2\n"
.align 8
.LC8:
.string "Failed to allocate device memory for cons_3\n"
.align 8
.LC9:
.string "Failed to allocate device memory for cons_4\n"
.align 8
.LC10:
.string "Failed to allocate device memory for q_1\n"
.align 8
.LC11:
.string "Failed to allocate device memory for q_2\n"
.align 8
.LC12:
.string "Failed to allocate device memory for q_3\n"
.align 8
.LC13:
.string "Failed to allocate device memory for q_4\n"
.text
.globl host_code
.type host_code, @function
host_code:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $264, %rsp
.cfi_def_cfa_offset 320
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
movq 320(%rsp), %r15
movq 328(%rsp), %r14
movq 336(%rsp), %rax
movq %rax, 48(%rsp)
movq 344(%rsp), %rbx
movq %rbx, 56(%rsp)
movq 352(%rsp), %r10
movq %r10, 64(%rsp)
movq 360(%rsp), %r11
movq %r11, 72(%rsp)
movq 368(%rsp), %rbp
movq %rbp, 80(%rsp)
movsd %xmm0, 88(%rsp)
movsd %xmm1, 96(%rsp)
movsd %xmm2, 104(%rsp)
movl 376(%rsp), %ebp
movl 384(%rsp), %r12d
movl 392(%rsp), %r13d
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
movslq %r12d, %rbx
movslq %r13d, %rax
imulq %rax, %rbx
movslq %ebp, %rax
imulq %rax, %rbx
salq $3, %rbx
leaq 120(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC1(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq (%rsp), %rsi
movq 120(%rsp), %rdi
call cudaMemcpy@PLT
leaq 128(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC2(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 8(%rsp), %rsi
movq 128(%rsp), %rdi
call cudaMemcpy@PLT
leaq 136(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC3(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 16(%rsp), %rsi
movq 136(%rsp), %rdi
call cudaMemcpy@PLT
leaq 144(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC4(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq 144(%rsp), %rdi
call cudaMemcpy@PLT
leaq 152(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC5(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 32(%rsp), %rsi
movq 152(%rsp), %rdi
call cudaMemcpy@PLT
leaq 160(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC6(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 40(%rsp), %rsi
movq 160(%rsp), %rdi
call cudaMemcpy@PLT
leaq 168(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC7(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq %r15, %rsi
movq 168(%rsp), %rdi
call cudaMemcpy@PLT
leaq 176(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC8(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 176(%rsp), %rdi
call cudaMemcpy@PLT
leaq 184(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC9(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 48(%rsp), %rsi
movq 184(%rsp), %rdi
call cudaMemcpy@PLT
leaq 192(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC10(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 56(%rsp), %rsi
movq 192(%rsp), %rdi
call cudaMemcpy@PLT
leaq 200(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC11(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 64(%rsp), %rsi
movq 200(%rsp), %rdi
call cudaMemcpy@PLT
leaq 208(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC12(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 72(%rsp), %rsi
movq 208(%rsp), %rdi
call cudaMemcpy@PLT
leaq 216(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq .LC13(%rip), %rdi
call _Z11check_errorPKc
movl $1, %ecx
movq %rbx, %rdx
movq 80(%rsp), %rsi
movq 216(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebp, %eax
shrl $2, %eax
movl %eax, %esi
addl $1, %esi
testb $3, %bpl
cmove %eax, %esi
movl %r12d, %edx
shrl $2, %edx
movl %edx, %eax
addl $1, %eax
testb $3, %r12b
cmove %edx, %eax
movl %r13d, %ecx
shrl $4, %ecx
movl %ecx, %edx
addl $1, %edx
testb $15, %r13b
cmove %ecx, %edx
movl %edx, 236(%rsp)
movl %eax, 240(%rsp)
movl $16, 224(%rsp)
movl $4, 228(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 224(%rsp), %rdx
movl $4, %ecx
movq 236(%rsp), %rdi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L22:
movl $2, %ecx
movq %rbx, %rdx
movq 120(%rsp), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 128(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 144(%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 152(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 136(%rsp), %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movsd 88(%rsp), %xmm0
xorpd .LC14(%rip), %xmm0
pushq %r13
.cfi_def_cfa_offset 328
pushq %r12
.cfi_def_cfa_offset 336
pushq %rbp
.cfi_def_cfa_offset 344
pushq 240(%rsp)
.cfi_def_cfa_offset 352
pushq 240(%rsp)
.cfi_def_cfa_offset 360
pushq 240(%rsp)
.cfi_def_cfa_offset 368
pushq 240(%rsp)
.cfi_def_cfa_offset 376
pushq 240(%rsp)
.cfi_def_cfa_offset 384
pushq 240(%rsp)
.cfi_def_cfa_offset 392
pushq 240(%rsp)
.cfi_def_cfa_offset 400
movsd 184(%rsp), %xmm2
movsd 176(%rsp), %xmm1
movq 240(%rsp), %r9
movq 232(%rsp), %r8
movq 224(%rsp), %rcx
movq 216(%rsp), %rdx
movq 208(%rsp), %rsi
movq 200(%rsp), %rdi
call _Z56__device_stub__Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiiiPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
addq $80, %rsp
.cfi_def_cfa_offset 320
jmp .L22
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size host_code, .-host_code
.section .rodata.str1.8
.align 8
.LC15:
.string "_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC14:
.long 0
.long -2147483648
.long 0
.long 0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "max-reordered-b.hip"
.globl _Z11check_errorPKc # -- Begin function _Z11check_errorPKc
.p2align 4, 0x90
.type _Z11check_errorPKc,@function
_Z11check_errorPKc: # @_Z11check_errorPKc
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movq %rdi, %rbx
callq hipGetLastError
testl %eax, %eax
jne .LBB0_2
# %bb.1:
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB0_2:
.cfi_def_cfa_offset 16
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rbx, %rsi
movq %rax, %rdx
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.Lfunc_end0:
.size _Z11check_errorPKc, .Lfunc_end0-_Z11check_errorPKc
.cfi_endproc
# -- End function
.globl _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii # -- Begin function _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.p2align 4, 0x90
.type _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii,@function
_Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii: # @_Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.cfi_startproc
# %bb.0:
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 288
movq %rdi, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rcx, 96(%rsp)
movq %r8, 88(%rsp)
movq %r9, 80(%rsp)
movsd %xmm0, 72(%rsp)
movsd %xmm1, 64(%rsp)
movsd %xmm2, 56(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rax
movq %rax, 168(%rsp)
leaq 288(%rsp), %rax
movq %rax, 176(%rsp)
leaq 296(%rsp), %rax
movq %rax, 184(%rsp)
leaq 304(%rsp), %rax
movq %rax, 192(%rsp)
leaq 312(%rsp), %rax
movq %rax, 200(%rsp)
leaq 320(%rsp), %rax
movq %rax, 208(%rsp)
leaq 328(%rsp), %rax
movq %rax, 216(%rsp)
leaq 336(%rsp), %rax
movq %rax, 224(%rsp)
leaq 72(%rsp), %rax
movq %rax, 232(%rsp)
leaq 64(%rsp), %rax
movq %rax, 240(%rsp)
leaq 56(%rsp), %rax
movq %rax, 248(%rsp)
leaq 344(%rsp), %rax
movq %rax, 256(%rsp)
leaq 352(%rsp), %rax
movq %rax, 264(%rsp)
leaq 360(%rsp), %rax
movq %rax, 272(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $296, %rsp # imm = 0x128
.cfi_adjust_cfa_offset -296
retq
.Lfunc_end1:
.size _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, .Lfunc_end1-_Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function host_code
.LCPI2_0:
.quad 0x8000000000000000 # double -0
.quad 0x8000000000000000 # double -0
.text
.globl host_code
.p2align 4, 0x90
.type host_code,@function
host_code: # @host_code
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 592
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movsd %xmm2, 168(%rsp) # 8-byte Spill
movsd %xmm1, 160(%rsp) # 8-byte Spill
movaps %xmm0, 512(%rsp) # 16-byte Spill
movq %r9, %rbx
movq %r8, 64(%rsp) # 8-byte Spill
movq %rcx, 72(%rsp) # 8-byte Spill
movq %rdx, 80(%rsp) # 8-byte Spill
movq %rsi, 88(%rsp) # 8-byte Spill
movq %rdi, %rbp
movslq 664(%rsp), %r15
movslq 656(%rsp), %r12
movslq 648(%rsp), %r13
movq %r13, %r14
imulq %r12, %r14
imulq %r15, %r14
shlq $3, %r14
leaq 40(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_1
# %bb.3: # %_Z11check_errorPKc.exit
movq 40(%rsp), %rdi
movq %rbp, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 32(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_4
# %bb.5: # %_Z11check_errorPKc.exit132
movq 32(%rsp), %rdi
movq 88(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_6
# %bb.7: # %_Z11check_errorPKc.exit134
movq 24(%rsp), %rdi
movq 80(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_8
# %bb.9: # %_Z11check_errorPKc.exit136
movq 16(%rsp), %rdi
movq 72(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_10
# %bb.11: # %_Z11check_errorPKc.exit138
movq 8(%rsp), %rdi
movq 64(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 152(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_12
# %bb.13: # %_Z11check_errorPKc.exit140
movq 152(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 144(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_14
# %bb.15: # %_Z11check_errorPKc.exit142
movq 592(%rsp), %rsi
movq 144(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 136(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_16
# %bb.17: # %_Z11check_errorPKc.exit144
movq 600(%rsp), %rsi
movq 136(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 128(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_18
# %bb.19: # %_Z11check_errorPKc.exit146
movq 608(%rsp), %rsi
movq 128(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 120(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_20
# %bb.21: # %_Z11check_errorPKc.exit148
movq 616(%rsp), %rsi
movq 120(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 112(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_22
# %bb.23: # %_Z11check_errorPKc.exit150
movq 624(%rsp), %rsi
movq 112(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 104(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_24
# %bb.25: # %_Z11check_errorPKc.exit152
movq 632(%rsp), %rsi
movq 104(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 96(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
jne .LBB2_26
# %bb.27: # %_Z11check_errorPKc.exit154
movq 640(%rsp), %rsi
movq 96(%rsp), %rdi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r15d, %eax
shrl $4, %eax
leal 1(%rax), %ecx
testb $15, %r15b
cmovel %eax, %ecx
movl %r12d, %eax
shrl $2, %eax
leal 1(%rax), %edi
testb $3, %r12b
cmovel %eax, %edi
movl %r13d, %eax
shrl $2, %eax
leal 1(%rax), %esi
testb $3, %r13b
cmovel %eax, %esi
shlq $32, %rdi
orq %rcx, %rdi
movabsq $17179869200, %rdx # imm = 0x400000010
movl $4, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_29
# %bb.28:
movq 40(%rsp), %rax
movq %rax, 344(%rsp)
movq 32(%rsp), %rax
movq %rax, 336(%rsp)
movq 24(%rsp), %rax
movq %rax, 328(%rsp)
movq 16(%rsp), %rax
movq %rax, 320(%rsp)
movq 8(%rsp), %rax
movq %rax, 312(%rsp)
movq 152(%rsp), %rax
movq %rax, 304(%rsp)
movq 144(%rsp), %rax
movq %rax, 296(%rsp)
movq 136(%rsp), %rax
movq %rax, 288(%rsp)
movq 128(%rsp), %rax
movq %rax, 280(%rsp)
movq 120(%rsp), %rax
movq %rax, 272(%rsp)
movq 112(%rsp), %rax
movq %rax, 264(%rsp)
movq 104(%rsp), %rax
movq %rax, 256(%rsp)
movq 96(%rsp), %rax
movq %rax, 248(%rsp)
movl %r13d, 60(%rsp)
movl %r12d, 56(%rsp)
movaps 512(%rsp), %xmm0 # 16-byte Reload
xorps .LCPI2_0(%rip), %xmm0
movl %r15d, 52(%rsp)
leaq 344(%rsp), %rax
movq %rax, 352(%rsp)
leaq 336(%rsp), %rax
movq %rax, 360(%rsp)
leaq 328(%rsp), %rax
movq %rax, 368(%rsp)
leaq 320(%rsp), %rax
movq %rax, 376(%rsp)
leaq 312(%rsp), %rax
movq %rax, 384(%rsp)
leaq 304(%rsp), %rax
movq %rax, 392(%rsp)
leaq 296(%rsp), %rax
movq %rax, 400(%rsp)
leaq 288(%rsp), %rax
movq %rax, 408(%rsp)
leaq 280(%rsp), %rax
movq %rax, 416(%rsp)
leaq 272(%rsp), %rax
movq %rax, 424(%rsp)
leaq 264(%rsp), %rax
movq %rax, 432(%rsp)
leaq 256(%rsp), %rax
movq %rax, 440(%rsp)
leaq 248(%rsp), %rax
movq %rax, 448(%rsp)
leaq 240(%rsp), %rax
movq %rax, 456(%rsp)
leaq 232(%rsp), %rax
movq %rax, 464(%rsp)
leaq 224(%rsp), %rax
movq %rax, 472(%rsp)
leaq 60(%rsp), %rax
movq %rax, 480(%rsp)
leaq 56(%rsp), %rax
movq %rax, 488(%rsp)
leaq 52(%rsp), %rax
movq %rax, 496(%rsp)
movlps %xmm0, 240(%rsp)
movsd 160(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd %xmm0, 232(%rsp)
movsd 168(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movsd %xmm0, 224(%rsp)
leaq 208(%rsp), %rdi
leaq 192(%rsp), %rsi
leaq 184(%rsp), %rdx
leaq 176(%rsp), %rcx
callq __hipPopCallConfiguration
movq 208(%rsp), %rsi
movl 216(%rsp), %edx
movq 192(%rsp), %rcx
movl 200(%rsp), %r8d
leaq 352(%rsp), %r9
movl $_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, %edi
pushq 176(%rsp)
.cfi_adjust_cfa_offset 8
pushq 192(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_29:
movq 40(%rsp), %rsi
movq %rbp, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 32(%rsp), %rsi
movq 88(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rsi
movq 72(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
movq 64(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rsi
movq 80(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
addq $536, %rsp # imm = 0x218
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_1:
.cfi_def_cfa_offset 592
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.1, %esi
jmp .LBB2_2
.LBB2_4:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.2, %esi
jmp .LBB2_2
.LBB2_6:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.3, %esi
jmp .LBB2_2
.LBB2_8:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.4, %esi
jmp .LBB2_2
.LBB2_10:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.5, %esi
jmp .LBB2_2
.LBB2_12:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.6, %esi
jmp .LBB2_2
.LBB2_14:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.7, %esi
jmp .LBB2_2
.LBB2_16:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.8, %esi
jmp .LBB2_2
.LBB2_18:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.9, %esi
jmp .LBB2_2
.LBB2_20:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.10, %esi
jmp .LBB2_2
.LBB2_22:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.11, %esi
jmp .LBB2_2
.LBB2_24:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.12, %esi
jmp .LBB2_2
.LBB2_26:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movl $.L.str.13, %esi
.LBB2_2:
movq %rax, %rdx
xorl %eax, %eax
callq printf
movl $-1, %edi
callq exit
.Lfunc_end2:
.size host_code, .Lfunc_end2-host_code
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "CUDA error : %s, %s\n"
.size .L.str, 21
.type _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii,@object # @_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.section .rodata,"a",@progbits
.globl _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.p2align 3, 0x0
_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii:
.quad _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.size _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Failed to allocate device memory for flux_0\n"
.size .L.str.1, 45
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Failed to allocate device memory for flux_1\n"
.size .L.str.2, 45
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Failed to allocate device memory for flux_2\n"
.size .L.str.3, 45
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Failed to allocate device memory for flux_3\n"
.size .L.str.4, 45
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Failed to allocate device memory for flux_4\n"
.size .L.str.5, 45
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Failed to allocate device memory for cons_1\n"
.size .L.str.6, 45
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Failed to allocate device memory for cons_2\n"
.size .L.str.7, 45
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Failed to allocate device memory for cons_3\n"
.size .L.str.8, 45
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Failed to allocate device memory for cons_4\n"
.size .L.str.9, 45
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Failed to allocate device memory for q_1\n"
.size .L.str.10, 42
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Failed to allocate device memory for q_2\n"
.size .L.str.11, 42
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "Failed to allocate device memory for q_3\n"
.size .L.str.12, 42
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "Failed to allocate device memory for q_4\n"
.size .L.str.13, 42
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii"
.size .L__unnamed_1, 43
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7hyptermPdS_S_S_S_S_S_S_S_S_S_S_S_dddiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //pass
//--blockDim=64 --gridDim=1 --no-inline
#include "cuda.h"
__global__ void foo(float* A) {
A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = 2.4f;
} | code for sm_80
Function : _Z3fooPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ MOV R5, 0x4019999a ; /* 0x4019999a00057802 */
/* 0x000fe20000000f00 */
/*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0050*/ ISETP.NE.AND P0, PT, R0.reuse, RZ, PT ; /* 0x000000ff0000720c */
/* 0x041fe20003f05270 */
/*0060*/ IMAD.SHL.U32 R0, R0, 0x2, RZ ; /* 0x0000000200007824 */
/* 0x000fca00078e00ff */
/*0070*/ SEL R0, R0, 0x1, P0 ; /* 0x0000000100007807 */
/* 0x000fca0000000000 */
/*0080*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0003 */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //pass
//--blockDim=64 --gridDim=1 --no-inline
#include "cuda.h"
__global__ void foo(float* A) {
A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = 2.4f;
} | .file "tmpxft_00068855_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z22__device_stub__Z3fooPfPf
.type _Z22__device_stub__Z3fooPfPf, @function
_Z22__device_stub__Z3fooPfPf:
.LFB2051:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3fooPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z22__device_stub__Z3fooPfPf, .-_Z22__device_stub__Z3fooPfPf
.globl _Z3fooPf
.type _Z3fooPf, @function
_Z3fooPf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z22__device_stub__Z3fooPfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z3fooPf, .-_Z3fooPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3fooPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3fooPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //pass
//--blockDim=64 --gridDim=1 --no-inline
#include "cuda.h"
__global__ void foo(float* A) {
A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = 2.4f;
} | //pass
//--blockDim=64 --gridDim=1 --no-inline
#include "hip/hip_runtime.h"
__global__ void foo(float* A) {
A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = 2.4f;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //pass
//--blockDim=64 --gridDim=1 --no-inline
#include "hip/hip_runtime.h"
__global__ void foo(float* A) {
A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = 2.4f;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3fooPf
.globl _Z3fooPf
.p2align 8
.type _Z3fooPf,@function
_Z3fooPf:
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 1, v0
v_cmp_ne_u32_e32 vcc_lo, 0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v0, 1, v1 :: v_dual_mov_b32 v1, 0x4019999a
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3fooPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 2
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3fooPf, .Lfunc_end0-_Z3fooPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3fooPf
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z3fooPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //pass
//--blockDim=64 --gridDim=1 --no-inline
#include "hip/hip_runtime.h"
__global__ void foo(float* A) {
A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = 2.4f;
} | .text
.file "kernel.hip"
.globl _Z18__device_stub__fooPf # -- Begin function _Z18__device_stub__fooPf
.p2align 4, 0x90
.type _Z18__device_stub__fooPf,@function
_Z18__device_stub__fooPf: # @_Z18__device_stub__fooPf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z3fooPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z18__device_stub__fooPf, .Lfunc_end0-_Z18__device_stub__fooPf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3fooPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3fooPf,@object # @_Z3fooPf
.section .rodata,"a",@progbits
.globl _Z3fooPf
.p2align 3, 0x0
_Z3fooPf:
.quad _Z18__device_stub__fooPf
.size _Z3fooPf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3fooPf"
.size .L__unnamed_1, 9
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__fooPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3fooPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3fooPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ MOV R5, 0x4019999a ; /* 0x4019999a00057802 */
/* 0x000fe20000000f00 */
/*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0050*/ ISETP.NE.AND P0, PT, R0.reuse, RZ, PT ; /* 0x000000ff0000720c */
/* 0x041fe20003f05270 */
/*0060*/ IMAD.SHL.U32 R0, R0, 0x2, RZ ; /* 0x0000000200007824 */
/* 0x000fca00078e00ff */
/*0070*/ SEL R0, R0, 0x1, P0 ; /* 0x0000000100007807 */
/* 0x000fca0000000000 */
/*0080*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0003 */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3fooPf
.globl _Z3fooPf
.p2align 8
.type _Z3fooPf,@function
_Z3fooPf:
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 1, v0
v_cmp_ne_u32_e32 vcc_lo, 0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v0, 1, v1 :: v_dual_mov_b32 v1, 0x4019999a
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3fooPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 2
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3fooPf, .Lfunc_end0-_Z3fooPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3fooPf
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z3fooPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00068855_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z22__device_stub__Z3fooPfPf
.type _Z22__device_stub__Z3fooPfPf, @function
_Z22__device_stub__Z3fooPfPf:
.LFB2051:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3fooPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z22__device_stub__Z3fooPfPf, .-_Z22__device_stub__Z3fooPfPf
.globl _Z3fooPf
.type _Z3fooPf, @function
_Z3fooPf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z22__device_stub__Z3fooPfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z3fooPf, .-_Z3fooPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3fooPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3fooPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl _Z18__device_stub__fooPf # -- Begin function _Z18__device_stub__fooPf
.p2align 4, 0x90
.type _Z18__device_stub__fooPf,@function
_Z18__device_stub__fooPf: # @_Z18__device_stub__fooPf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z3fooPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z18__device_stub__fooPf, .Lfunc_end0-_Z18__device_stub__fooPf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3fooPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3fooPf,@object # @_Z3fooPf
.section .rodata,"a",@progbits
.globl _Z3fooPf
.p2align 3, 0x0
_Z3fooPf:
.quad _Z18__device_stub__fooPf
.size _Z3fooPf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3fooPf"
.size .L__unnamed_1, 9
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__fooPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3fooPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#define N 128*256
#define THREADS_PER_BLOCK 256
#define N_BLOCKS N/THREADS_PER_BLOCK
// Kernel to add N integers using threads and blocks
__global__ void add(int *a, int *b, int *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
// Main program
int main(void){
int *a,*b,*c; // Host copies
int *a_dev,*b_dev,*c_dev; // Device copies
int size = N*sizeof(int); // Size of N integer
// Allocate host memory
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate device memory
cudaMalloc( (void**)&a_dev, size);
cudaMalloc( (void**)&b_dev, size);
cudaMalloc( (void**)&c_dev, size);
// Initialize
for (int i=0; i<N; i++){
a[i] = i;
b[i] = i;
}
// Copy inputs to device
cudaMemcpy( a_dev, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( b_dev, b, size, cudaMemcpyHostToDevice );
// Launch kernel on device
add <<< N_BLOCKS , THREADS_PER_BLOCK >>> (a_dev,b_dev,c_dev);
// Copy device result back to host
cudaMemcpy( c, c_dev, size, cudaMemcpyDeviceToHost );
// Print result
for (int i=0; i<N; i++)
printf("%d\n",c[i]);
// Free device memory
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
// Free host memory
free(a);
free(b);
free(c);
return 0;
} | code for sm_80
Function : _Z3addPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00b0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#define N 128*256
#define THREADS_PER_BLOCK 256
#define N_BLOCKS N/THREADS_PER_BLOCK
// Kernel to add N integers using threads and blocks
__global__ void add(int *a, int *b, int *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
// Main program
int main(void){
int *a,*b,*c; // Host copies
int *a_dev,*b_dev,*c_dev; // Device copies
int size = N*sizeof(int); // Size of N integer
// Allocate host memory
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate device memory
cudaMalloc( (void**)&a_dev, size);
cudaMalloc( (void**)&b_dev, size);
cudaMalloc( (void**)&c_dev, size);
// Initialize
for (int i=0; i<N; i++){
a[i] = i;
b[i] = i;
}
// Copy inputs to device
cudaMemcpy( a_dev, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( b_dev, b, size, cudaMemcpyHostToDevice );
// Launch kernel on device
add <<< N_BLOCKS , THREADS_PER_BLOCK >>> (a_dev,b_dev,c_dev);
// Copy device result back to host
cudaMemcpy( c, c_dev, size, cudaMemcpyDeviceToHost );
// Print result
for (int i=0; i<N; i++)
printf("%d\n",c[i]);
// Free device memory
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
// Free host memory
free(a);
free(b);
free(c);
return 0;
} | .file "tmpxft_001a1348_00000000-6_sum_thr_blo.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z3addPiS_S_PiS_S_
.type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function
_Z26__device_stub__Z3addPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_
.globl _Z3addPiS_S_
.type _Z3addPiS_S_, @function
_Z3addPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPiS_S_, .-_Z3addPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $131072, %edi
call malloc@PLT
movq %rax, %r12
movl $131072, %edi
call malloc@PLT
movq %rax, %rbp
movl $131072, %edi
call malloc@PLT
movq %rax, %r14
leaq 8(%rsp), %rdi
movl $131072, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $131072, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $131072, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
movl %eax, (%r12,%rax,4)
movl %eax, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $32768, %rax
jne .L12
movl $1, %ecx
movl $131072, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $131072, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $128, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
movl $2, %ecx
movl $131072, %edx
movq 24(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movq %r14, %rbx
leaq 131072(%r14), %r15
leaq .LC0(%rip), %r13
.L14:
movl (%rbx), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r15, %rbx
jne .L14
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z26__device_stub__Z3addPiS_S_PiS_S_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z3addPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#define N 128*256
#define THREADS_PER_BLOCK 256
#define N_BLOCKS N/THREADS_PER_BLOCK
// Kernel to add N integers using threads and blocks
__global__ void add(int *a, int *b, int *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
// Main program
int main(void){
int *a,*b,*c; // Host copies
int *a_dev,*b_dev,*c_dev; // Device copies
int size = N*sizeof(int); // Size of N integer
// Allocate host memory
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate device memory
cudaMalloc( (void**)&a_dev, size);
cudaMalloc( (void**)&b_dev, size);
cudaMalloc( (void**)&c_dev, size);
// Initialize
for (int i=0; i<N; i++){
a[i] = i;
b[i] = i;
}
// Copy inputs to device
cudaMemcpy( a_dev, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( b_dev, b, size, cudaMemcpyHostToDevice );
// Launch kernel on device
add <<< N_BLOCKS , THREADS_PER_BLOCK >>> (a_dev,b_dev,c_dev);
// Copy device result back to host
cudaMemcpy( c, c_dev, size, cudaMemcpyDeviceToHost );
// Print result
for (int i=0; i<N; i++)
printf("%d\n",c[i]);
// Free device memory
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
// Free host memory
free(a);
free(b);
free(c);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 128*256
#define THREADS_PER_BLOCK 256
#define N_BLOCKS N/THREADS_PER_BLOCK
// Kernel to add N integers using threads and blocks
__global__ void add(int *a, int *b, int *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
// Main program
int main(void){
int *a,*b,*c; // Host copies
int *a_dev,*b_dev,*c_dev; // Device copies
int size = N*sizeof(int); // Size of N integer
// Allocate host memory
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate device memory
hipMalloc( (void**)&a_dev, size);
hipMalloc( (void**)&b_dev, size);
hipMalloc( (void**)&c_dev, size);
// Initialize
for (int i=0; i<N; i++){
a[i] = i;
b[i] = i;
}
// Copy inputs to device
hipMemcpy( a_dev, a, size, hipMemcpyHostToDevice );
hipMemcpy( b_dev, b, size, hipMemcpyHostToDevice );
// Launch kernel on device
add <<< N_BLOCKS , THREADS_PER_BLOCK >>> (a_dev,b_dev,c_dev);
// Copy device result back to host
hipMemcpy( c, c_dev, size, hipMemcpyDeviceToHost );
// Print result
for (int i=0; i<N; i++)
printf("%d\n",c[i]);
// Free device memory
hipFree(a_dev);
hipFree(b_dev);
hipFree(c_dev);
// Free host memory
free(a);
free(b);
free(c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 128*256
#define THREADS_PER_BLOCK 256
#define N_BLOCKS N/THREADS_PER_BLOCK
// Kernel to add N integers using threads and blocks
__global__ void add(int *a, int *b, int *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
// Main program
int main(void){
int *a,*b,*c; // Host copies
int *a_dev,*b_dev,*c_dev; // Device copies
int size = N*sizeof(int); // Size of N integer
// Allocate host memory
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate device memory
hipMalloc( (void**)&a_dev, size);
hipMalloc( (void**)&b_dev, size);
hipMalloc( (void**)&c_dev, size);
// Initialize
for (int i=0; i<N; i++){
a[i] = i;
b[i] = i;
}
// Copy inputs to device
hipMemcpy( a_dev, a, size, hipMemcpyHostToDevice );
hipMemcpy( b_dev, b, size, hipMemcpyHostToDevice );
// Launch kernel on device
add <<< N_BLOCKS , THREADS_PER_BLOCK >>> (a_dev,b_dev,c_dev);
// Copy device result back to host
hipMemcpy( c, c_dev, size, hipMemcpyDeviceToHost );
// Print result
for (int i=0; i<N; i++)
printf("%d\n",c[i]);
// Free device memory
hipFree(a_dev);
hipFree(b_dev);
hipFree(c_dev);
// Free host memory
free(a);
free(b);
free(c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_
.globl _Z3addPiS_S_
.p2align 8
.type _Z3addPiS_S_,@function
_Z3addPiS_S_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 128*256
#define THREADS_PER_BLOCK 256
#define N_BLOCKS N/THREADS_PER_BLOCK
// Kernel to add N integers using threads and blocks
__global__ void add(int *a, int *b, int *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
// Main program
int main(void){
int *a,*b,*c; // Host copies
int *a_dev,*b_dev,*c_dev; // Device copies
int size = N*sizeof(int); // Size of N integer
// Allocate host memory
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate device memory
hipMalloc( (void**)&a_dev, size);
hipMalloc( (void**)&b_dev, size);
hipMalloc( (void**)&c_dev, size);
// Initialize
for (int i=0; i<N; i++){
a[i] = i;
b[i] = i;
}
// Copy inputs to device
hipMemcpy( a_dev, a, size, hipMemcpyHostToDevice );
hipMemcpy( b_dev, b, size, hipMemcpyHostToDevice );
// Launch kernel on device
add <<< N_BLOCKS , THREADS_PER_BLOCK >>> (a_dev,b_dev,c_dev);
// Copy device result back to host
hipMemcpy( c, c_dev, size, hipMemcpyDeviceToHost );
// Print result
for (int i=0; i<N; i++)
printf("%d\n",c[i]);
// Free device memory
hipFree(a_dev);
hipFree(b_dev);
hipFree(c_dev);
// Free host memory
free(a);
free(b);
free(c);
return 0;
} | .text
.file "sum_thr_blo.hip"
.globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_,@function
_Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $131072, %edi # imm = 0x20000
callq malloc
movq %rax, %rbx
movl $131072, %edi # imm = 0x20000
callq malloc
movq %rax, %r14
movl $131072, %edi # imm = 0x20000
callq malloc
movq %rax, %r15
leaq 16(%rsp), %rdi
movl $131072, %esi # imm = 0x20000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $131072, %esi # imm = 0x20000
callq hipMalloc
movq %rsp, %rdi
movl $131072, %esi # imm = 0x20000
callq hipMalloc
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,4)
movl %eax, (%r14,%rax,4)
incq %rax
cmpq $32768, %rax # imm = 0x8000
jne .LBB1_1
# %bb.2:
movq 16(%rsp), %rdi
movl $131072, %edx # imm = 0x20000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $131072, %edx # imm = 0x20000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967424, %rdi # imm = 0x100000080
leaq 128(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
movl $131072, %edx # imm = 0x20000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl (%r15,%r12,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r12
cmpq $32768, %r12 # imm = 0x8000
jne .LBB1_5
# %bb.6:
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_,@object # @_Z3addPiS_S_
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_
.p2align 3, 0x0
_Z3addPiS_S_:
.quad _Z18__device_stub__addPiS_S_
.size _Z3addPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d\n"
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00b0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_
.globl _Z3addPiS_S_
.p2align 8
.type _Z3addPiS_S_,@function
_Z3addPiS_S_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001a1348_00000000-6_sum_thr_blo.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z3addPiS_S_PiS_S_
.type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function
_Z26__device_stub__Z3addPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_
.globl _Z3addPiS_S_
.type _Z3addPiS_S_, @function
_Z3addPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPiS_S_, .-_Z3addPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $131072, %edi
call malloc@PLT
movq %rax, %r12
movl $131072, %edi
call malloc@PLT
movq %rax, %rbp
movl $131072, %edi
call malloc@PLT
movq %rax, %r14
leaq 8(%rsp), %rdi
movl $131072, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $131072, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $131072, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
movl %eax, (%r12,%rax,4)
movl %eax, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $32768, %rax
jne .L12
movl $1, %ecx
movl $131072, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $131072, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $128, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
movl $2, %ecx
movl $131072, %edx
movq 24(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movq %r14, %rbx
leaq 131072(%r14), %r15
leaq .LC0(%rip), %r13
.L14:
movl (%rbx), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r15, %rbx
jne .L14
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z26__device_stub__Z3addPiS_S_PiS_S_
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z3addPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sum_thr_blo.hip"
.globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_,@function
_Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $131072, %edi # imm = 0x20000
callq malloc
movq %rax, %rbx
movl $131072, %edi # imm = 0x20000
callq malloc
movq %rax, %r14
movl $131072, %edi # imm = 0x20000
callq malloc
movq %rax, %r15
leaq 16(%rsp), %rdi
movl $131072, %esi # imm = 0x20000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $131072, %esi # imm = 0x20000
callq hipMalloc
movq %rsp, %rdi
movl $131072, %esi # imm = 0x20000
callq hipMalloc
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, (%rbx,%rax,4)
movl %eax, (%r14,%rax,4)
incq %rax
cmpq $32768, %rax # imm = 0x8000
jne .LBB1_1
# %bb.2:
movq 16(%rsp), %rdi
movl $131072, %edx # imm = 0x20000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $131072, %edx # imm = 0x20000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967424, %rdi # imm = 0x100000080
leaq 128(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq (%rsp), %rsi
movl $131072, %edx # imm = 0x20000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl (%r15,%r12,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
incq %r12
cmpq $32768, %r12 # imm = 0x8000
jne .LBB1_5
# %bb.6:
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_,@object # @_Z3addPiS_S_
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_
.p2align 3, 0x0
_Z3addPiS_S_:
.quad _Z18__device_stub__addPiS_S_
.size _Z3addPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d\n"
.size .L.str, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
Program name: gld_throughput.cu
Author name: Dr. Nileshchandra Pikle
Email: nilesh.pikle@gmail.com
Contact Number: 7276834418
Purpose: Program to demonstrate global memory efficiency
Description: A simple vector addition kernel is written which performs strided access to arrays.
Because of different offset values global memory load effeciency varies. The data
type is float (single precision) hence each thread request 4 bytes of data.
Depends on L1 cache size and different values of offset observe global memory
load efficiency. Use following profiling command
1. nvprof --devices 0 --metrics gld_transactions ./a.out
This metrics returns total number of global memory load transactions
2. nvprof --devices 0 --metrics gld_efficiency ./a.out
This metrics returns global memory load efficiency
To understand Memory coalesing refer following linksW
Link1: https://www.youtube.com/watch?v=mLxZyWOI340
Link2: https://devblogs.nvidia.com/how-access-global-memory-efficiently-cuda-c-kernels/
Link3: https://stackoverflow.com/questions/5041328/in-cuda-what-is-memory-coalescing-and-how-is-it-achieved
*/
#include<stdio.h>
#include<stdlib.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess) \
{ \
printf("Error %s %d", __FILE__, __LINE__); \
printf("\n Code %d Reason %s \n",error, cudaGetErrorString(error)); \
exit(1); \
} \
}
__global__ void testKernel(float *d_A, float *d_B, float *d_C, int N, int offset)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int k = gid + offset;
if(k < N)
{
d_C[gid] = d_A[k] + d_B[k];
}
}
void initData(float *h_A, float *h_B, float *hostRef, float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
h_A[i] = 1.0;
h_B[i] = 2.0;
hostRef[i] = 0.0;
gpuRef[i] = 0.0;
}
}
void hostFunc(float *h_A, float *h_B, float *hostRef, int N, int offset)
{
int idx;
for(int k = 0, idx = offset; idx < N; idx++,k++)
{
hostRef[k] = h_A[idx] + h_B[idx];
}
}
int compareResults(float *hostRef,float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
if(hostRef[i] != gpuRef[i])
{
return 1;
}
}
return 0;
}
int main()
{
int N;
N = 2 << 20; // # elements in arrays
size_t nBytes = N * sizeof(float); // Size reqiored to stoare arrays
int offset;// To determine the stride size in kernel
printf("\n Enter offset value: ");
scanf("%d",&offset);
/********************* Memory allocation at host side ****************************/
float *h_A = (float *)malloc(nBytes); // Host side input array h_A
float *h_B = (float *)malloc(nBytes); // Host side input array h_B
float *hostRef = (float *)malloc(nBytes); // Host side reference output array hostRef
float *gpuRef = (float *)malloc(nBytes); // Device side reference output array gpuRef
initData(h_A,h_B,hostRef, gpuRef, N); // Data initialization function
hostFunc(h_A, h_B, hostRef, N, offset);
/********************* Memory allocation at device side ****************************/
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((void **)&d_A, nBytes)); // Device side input array d_A
CHECK(cudaMalloc((void **)&d_B, nBytes)); // Device side input array d_B
CHECK(cudaMalloc((void **)&d_C, nBytes)); // Device side output array d_C
/********************* Data transfer from host to device ****************************/
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
/******************************* Kernel Launch **************************************/
int numT, numB;
numT = 32; // # threads per block
numB = ceil(N/(float)numT); // # blocks
testKernel<<<numB,numT>>>(d_A, d_B, d_C, N, offset); // Kernel to check global memory efficiency
/********************* Data transfer from host to device ****************************/
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); // Transfer data from device to host
int check = compareResults(hostRef,gpuRef, N);
if(check!= 0)
{
printf("\n ALERT!!! CPU and GPU side results are not matching!!!");
}
/******************************* Free device and Host Memories ********************************/
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return(0);
} | code for sm_80
Function : _Z10testKernelPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ IADD3 R4, R6, c[0x0][0x17c], RZ ; /* 0x00005f0006047a10 */
/* 0x000fc80007ffe0ff */
/*0050*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x178], PT ; /* 0x00005e0004007a0c */
/* 0x000fda0003f06270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0090*/ IMAD.WIDE R2, R4, R7, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fc800078e0207 */
/*00a0*/ IMAD.WIDE R4, R4, R7.reuse, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x080fe400078e0207 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1900 */
/*00d0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00e0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00f0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
Program name: gld_throughput.cu
Author name: Dr. Nileshchandra Pikle
Email: nilesh.pikle@gmail.com
Contact Number: 7276834418
Purpose: Program to demonstrate global memory efficiency
Description: A simple vector addition kernel is written which performs strided access to arrays.
Because of different offset values global memory load effeciency varies. The data
type is float (single precision) hence each thread request 4 bytes of data.
Depends on L1 cache size and different values of offset observe global memory
load efficiency. Use following profiling command
1. nvprof --devices 0 --metrics gld_transactions ./a.out
This metrics returns total number of global memory load transactions
2. nvprof --devices 0 --metrics gld_efficiency ./a.out
This metrics returns global memory load efficiency
To understand Memory coalesing refer following linksW
Link1: https://www.youtube.com/watch?v=mLxZyWOI340
Link2: https://devblogs.nvidia.com/how-access-global-memory-efficiently-cuda-c-kernels/
Link3: https://stackoverflow.com/questions/5041328/in-cuda-what-is-memory-coalescing-and-how-is-it-achieved
*/
#include<stdio.h>
#include<stdlib.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess) \
{ \
printf("Error %s %d", __FILE__, __LINE__); \
printf("\n Code %d Reason %s \n",error, cudaGetErrorString(error)); \
exit(1); \
} \
}
__global__ void testKernel(float *d_A, float *d_B, float *d_C, int N, int offset)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int k = gid + offset;
if(k < N)
{
d_C[gid] = d_A[k] + d_B[k];
}
}
void initData(float *h_A, float *h_B, float *hostRef, float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
h_A[i] = 1.0;
h_B[i] = 2.0;
hostRef[i] = 0.0;
gpuRef[i] = 0.0;
}
}
void hostFunc(float *h_A, float *h_B, float *hostRef, int N, int offset)
{
int idx;
for(int k = 0, idx = offset; idx < N; idx++,k++)
{
hostRef[k] = h_A[idx] + h_B[idx];
}
}
int compareResults(float *hostRef,float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
if(hostRef[i] != gpuRef[i])
{
return 1;
}
}
return 0;
}
int main()
{
int N;
N = 2 << 20; // # elements in arrays
size_t nBytes = N * sizeof(float); // Size reqiored to stoare arrays
int offset;// To determine the stride size in kernel
printf("\n Enter offset value: ");
scanf("%d",&offset);
/********************* Memory allocation at host side ****************************/
float *h_A = (float *)malloc(nBytes); // Host side input array h_A
float *h_B = (float *)malloc(nBytes); // Host side input array h_B
float *hostRef = (float *)malloc(nBytes); // Host side reference output array hostRef
float *gpuRef = (float *)malloc(nBytes); // Device side reference output array gpuRef
initData(h_A,h_B,hostRef, gpuRef, N); // Data initialization function
hostFunc(h_A, h_B, hostRef, N, offset);
/********************* Memory allocation at device side ****************************/
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((void **)&d_A, nBytes)); // Device side input array d_A
CHECK(cudaMalloc((void **)&d_B, nBytes)); // Device side input array d_B
CHECK(cudaMalloc((void **)&d_C, nBytes)); // Device side output array d_C
/********************* Data transfer from host to device ****************************/
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
/******************************* Kernel Launch **************************************/
int numT, numB;
numT = 32; // # threads per block
numB = ceil(N/(float)numT); // # blocks
testKernel<<<numB,numT>>>(d_A, d_B, d_C, N, offset); // Kernel to check global memory efficiency
/********************* Data transfer from host to device ****************************/
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); // Transfer data from device to host
int check = compareResults(hostRef,gpuRef, N);
if(check!= 0)
{
printf("\n ALERT!!! CPU and GPU side results are not matching!!!");
}
/******************************* Free device and Host Memories ********************************/
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return(0);
} | .file "tmpxft_00145458_00000000-6_11_gld_throughput.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8initDataPfS_S_S_i
.type _Z8initDataPfS_S_S_i, @function
_Z8initDataPfS_S_S_i:
.LFB2057:
.cfi_startproc
endbr64
movq %rsi, %r9
testl %r8d, %r8d
jle .L3
movslq %r8d, %r8
leaq 0(,%r8,4), %rsi
movl $0, %eax
movss .LC0(%rip), %xmm1
movss .LC1(%rip), %xmm0
.L5:
movss %xmm1, (%rdi,%rax)
movss %xmm0, (%r9,%rax)
movl $0x00000000, (%rdx,%rax)
movl $0x00000000, (%rcx,%rax)
addq $4, %rax
cmpq %rsi, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z8initDataPfS_S_S_i, .-_Z8initDataPfS_S_S_i
.globl _Z8hostFuncPfS_S_ii
.type _Z8hostFuncPfS_S_ii, @function
_Z8hostFuncPfS_S_ii:
.LFB2058:
.cfi_startproc
endbr64
movq %rdi, %rax
cmpl %ecx, %r8d
jge .L7
movslq %r8d, %r8
movq %r8, %rdi
negq %rdi
leaq (%rdx,%rdi,4), %rdx
.L9:
movss (%rax,%r8,4), %xmm0
addss (%rsi,%r8,4), %xmm0
movss %xmm0, (%rdx,%r8,4)
addq $1, %r8
cmpl %r8d, %ecx
jg .L9
.L7:
ret
.cfi_endproc
.LFE2058:
.size _Z8hostFuncPfS_S_ii, .-_Z8hostFuncPfS_S_ii
.globl _Z14compareResultsPfS_i
.type _Z14compareResultsPfS_i, @function
_Z14compareResultsPfS_i:
.LFB2059:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L15
movslq %edx, %rdx
salq $2, %rdx
movl $0, %eax
.L14:
movss (%rdi,%rax), %xmm0
ucomiss (%rsi,%rax), %xmm0
jp .L16
jne .L16
addq $4, %rax
cmpq %rdx, %rax
jne .L14
movl $0, %eax
ret
.L15:
movl $0, %eax
ret
.L16:
movl $1, %eax
ret
.cfi_endproc
.LFE2059:
.size _Z14compareResultsPfS_i, .-_Z14compareResultsPfS_i
.globl _Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii
.type _Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii, @function
_Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10testKernelPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii, .-_Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii
.globl _Z10testKernelPfS_S_ii
.type _Z10testKernelPfS_S_ii, @function
_Z10testKernelPfS_S_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z10testKernelPfS_S_ii, .-_Z10testKernelPfS_S_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "\n Enter offset value: "
.LC4:
.string "%d"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "/home/ubuntu/Datasets/stackv2/train-structured/NileshchandraPikle/Introduction-to-CUDA-Programming/master/11_gld_throughput.cu"
.section .rodata.str1.1
.LC6:
.string "Error %s %d"
.LC7:
.string "\n Code %d Reason %s \n"
.section .rodata.str1.8
.align 8
.LC8:
.string "\n ALERT!!! CPU and GPU side results are not matching!!!"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq .LC3(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
leaq 4(%rsp), %rsi
leaq .LC4(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movl $8388608, %edi
call malloc@PLT
movq %rax, %rbp
movl $8388608, %edi
call malloc@PLT
movq %rax, %rbx
movl $8388608, %edi
call malloc@PLT
movq %rax, %r12
movl $8388608, %edi
call malloc@PLT
movq %rax, %r13
movl $2097152, %r8d
movq %rax, %rcx
movq %r12, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z8initDataPfS_S_S_i
movl 4(%rsp), %r8d
movl $2097152, %ecx
movq %r12, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z8hostFuncPfS_S_ii
leaq 8(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L38
leaq 16(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L39
leaq 24(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L40
movl $1, %ecx
movl $8388608, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L41
movl $1, %ecx
movl $8388608, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L42
movl $32, 44(%rsp)
movl $1, 48(%rsp)
movl $65536, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L43
.L33:
movl $2, %ecx
movl $8388608, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L44
movl $2097152, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z14compareResultsPfS_i
testl %eax, %eax
jne .L45
.L35:
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L46
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
movl %eax, %r14d
movl $104, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L39:
movl $105, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L40:
movl $106, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L41:
movl $110, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L42:
movl $111, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L43:
movl 4(%rsp), %r8d
movl $2097152, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii
jmp .L33
.L44:
movl $122, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L45:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L35
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1
.LC9:
.string "_Z10testKernelPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z10testKernelPfS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.align 4
.LC1:
.long 1073741824
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
Program name: gld_throughput.cu
Author name: Dr. Nileshchandra Pikle
Email: nilesh.pikle@gmail.com
Contact Number: 7276834418
Purpose: Program to demonstrate global memory efficiency
Description: A simple vector addition kernel is written which performs strided access to arrays.
Because of different offset values global memory load effeciency varies. The data
type is float (single precision) hence each thread request 4 bytes of data.
Depends on L1 cache size and different values of offset observe global memory
load efficiency. Use following profiling command
1. nvprof --devices 0 --metrics gld_transactions ./a.out
This metrics returns total number of global memory load transactions
2. nvprof --devices 0 --metrics gld_efficiency ./a.out
This metrics returns global memory load efficiency
To understand Memory coalesing refer following linksW
Link1: https://www.youtube.com/watch?v=mLxZyWOI340
Link2: https://devblogs.nvidia.com/how-access-global-memory-efficiently-cuda-c-kernels/
Link3: https://stackoverflow.com/questions/5041328/in-cuda-what-is-memory-coalescing-and-how-is-it-achieved
*/
#include<stdio.h>
#include<stdlib.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess) \
{ \
printf("Error %s %d", __FILE__, __LINE__); \
printf("\n Code %d Reason %s \n",error, cudaGetErrorString(error)); \
exit(1); \
} \
}
__global__ void testKernel(float *d_A, float *d_B, float *d_C, int N, int offset)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int k = gid + offset;
if(k < N)
{
d_C[gid] = d_A[k] + d_B[k];
}
}
void initData(float *h_A, float *h_B, float *hostRef, float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
h_A[i] = 1.0;
h_B[i] = 2.0;
hostRef[i] = 0.0;
gpuRef[i] = 0.0;
}
}
void hostFunc(float *h_A, float *h_B, float *hostRef, int N, int offset)
{
int idx;
for(int k = 0, idx = offset; idx < N; idx++,k++)
{
hostRef[k] = h_A[idx] + h_B[idx];
}
}
int compareResults(float *hostRef,float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
if(hostRef[i] != gpuRef[i])
{
return 1;
}
}
return 0;
}
int main()
{
int N;
N = 2 << 20; // # elements in arrays
size_t nBytes = N * sizeof(float); // Size reqiored to stoare arrays
int offset;// To determine the stride size in kernel
printf("\n Enter offset value: ");
scanf("%d",&offset);
/********************* Memory allocation at host side ****************************/
float *h_A = (float *)malloc(nBytes); // Host side input array h_A
float *h_B = (float *)malloc(nBytes); // Host side input array h_B
float *hostRef = (float *)malloc(nBytes); // Host side reference output array hostRef
float *gpuRef = (float *)malloc(nBytes); // Device side reference output array gpuRef
initData(h_A,h_B,hostRef, gpuRef, N); // Data initialization function
hostFunc(h_A, h_B, hostRef, N, offset);
/********************* Memory allocation at device side ****************************/
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((void **)&d_A, nBytes)); // Device side input array d_A
CHECK(cudaMalloc((void **)&d_B, nBytes)); // Device side input array d_B
CHECK(cudaMalloc((void **)&d_C, nBytes)); // Device side output array d_C
/********************* Data transfer from host to device ****************************/
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
/******************************* Kernel Launch **************************************/
int numT, numB;
numT = 32; // # threads per block
numB = ceil(N/(float)numT); // # blocks
testKernel<<<numB,numT>>>(d_A, d_B, d_C, N, offset); // Kernel to check global memory efficiency
/********************* Data transfer from host to device ****************************/
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); // Transfer data from device to host
int check = compareResults(hostRef,gpuRef, N);
if(check!= 0)
{
printf("\n ALERT!!! CPU and GPU side results are not matching!!!");
}
/******************************* Free device and Host Memories ********************************/
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return(0);
} | /*
Program name: gld_throughput.cu
Author name: Dr. Nileshchandra Pikle
Email: nilesh.pikle@gmail.com
Contact Number: 7276834418
Purpose: Program to demonstrate global memory efficiency
Description: A simple vector addition kernel is written which performs strided access to arrays.
Because of different offset values global memory load effeciency varies. The data
type is float (single precision) hence each thread request 4 bytes of data.
Depends on L1 cache size and different values of offset observe global memory
load efficiency. Use following profiling command
1. nvprof --devices 0 --metrics gld_transactions ./a.out
This metrics returns total number of global memory load transactions
2. nvprof --devices 0 --metrics gld_efficiency ./a.out
This metrics returns global memory load efficiency
To understand Memory coalesing refer following linksW
Link1: https://www.youtube.com/watch?v=mLxZyWOI340
Link2: https://devblogs.nvidia.com/how-access-global-memory-efficiently-cuda-c-kernels/
Link3: https://stackoverflow.com/questions/5041328/in-cuda-what-is-memory-coalescing-and-how-is-it-achieved
*/
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if(error != hipSuccess) \
{ \
printf("Error %s %d", __FILE__, __LINE__); \
printf("\n Code %d Reason %s \n",error, hipGetErrorString(error)); \
exit(1); \
} \
}
__global__ void testKernel(float *d_A, float *d_B, float *d_C, int N, int offset)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int k = gid + offset;
if(k < N)
{
d_C[gid] = d_A[k] + d_B[k];
}
}
void initData(float *h_A, float *h_B, float *hostRef, float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
h_A[i] = 1.0;
h_B[i] = 2.0;
hostRef[i] = 0.0;
gpuRef[i] = 0.0;
}
}
void hostFunc(float *h_A, float *h_B, float *hostRef, int N, int offset)
{
int idx;
for(int k = 0, idx = offset; idx < N; idx++,k++)
{
hostRef[k] = h_A[idx] + h_B[idx];
}
}
int compareResults(float *hostRef,float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
if(hostRef[i] != gpuRef[i])
{
return 1;
}
}
return 0;
}
int main()
{
int N;
N = 2 << 20; // # elements in arrays
size_t nBytes = N * sizeof(float); // Size reqiored to stoare arrays
int offset;// To determine the stride size in kernel
printf("\n Enter offset value: ");
scanf("%d",&offset);
/********************* Memory allocation at host side ****************************/
float *h_A = (float *)malloc(nBytes); // Host side input array h_A
float *h_B = (float *)malloc(nBytes); // Host side input array h_B
float *hostRef = (float *)malloc(nBytes); // Host side reference output array hostRef
float *gpuRef = (float *)malloc(nBytes); // Device side reference output array gpuRef
initData(h_A,h_B,hostRef, gpuRef, N); // Data initialization function
hostFunc(h_A, h_B, hostRef, N, offset);
/********************* Memory allocation at device side ****************************/
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((void **)&d_A, nBytes)); // Device side input array d_A
CHECK(hipMalloc((void **)&d_B, nBytes)); // Device side input array d_B
CHECK(hipMalloc((void **)&d_C, nBytes)); // Device side output array d_C
/********************* Data transfer from host to device ****************************/
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
/******************************* Kernel Launch **************************************/
int numT, numB;
numT = 32; // # threads per block
numB = ceil(N/(float)numT); // # blocks
testKernel<<<numB,numT>>>(d_A, d_B, d_C, N, offset); // Kernel to check global memory efficiency
/********************* Data transfer from host to device ****************************/
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); // Transfer data from device to host
int check = compareResults(hostRef,gpuRef, N);
if(check!= 0)
{
printf("\n ALERT!!! CPU and GPU side results are not matching!!!");
}
/******************************* Free device and Host Memories ********************************/
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return(0);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
Program name: gld_throughput.cu
Author name: Dr. Nileshchandra Pikle
Email: nilesh.pikle@gmail.com
Contact Number: 7276834418
Purpose: Program to demonstrate global memory efficiency
Description: A simple vector addition kernel is written which performs strided access to arrays.
Because of different offset values global memory load effeciency varies. The data
type is float (single precision) hence each thread request 4 bytes of data.
Depends on L1 cache size and different values of offset observe global memory
load efficiency. Use following profiling command
1. nvprof --devices 0 --metrics gld_transactions ./a.out
This metrics returns total number of global memory load transactions
2. nvprof --devices 0 --metrics gld_efficiency ./a.out
This metrics returns global memory load efficiency
To understand Memory coalesing refer following linksW
Link1: https://www.youtube.com/watch?v=mLxZyWOI340
Link2: https://devblogs.nvidia.com/how-access-global-memory-efficiently-cuda-c-kernels/
Link3: https://stackoverflow.com/questions/5041328/in-cuda-what-is-memory-coalescing-and-how-is-it-achieved
*/
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if(error != hipSuccess) \
{ \
printf("Error %s %d", __FILE__, __LINE__); \
printf("\n Code %d Reason %s \n",error, hipGetErrorString(error)); \
exit(1); \
} \
}
__global__ void testKernel(float *d_A, float *d_B, float *d_C, int N, int offset)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int k = gid + offset;
if(k < N)
{
d_C[gid] = d_A[k] + d_B[k];
}
}
void initData(float *h_A, float *h_B, float *hostRef, float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
h_A[i] = 1.0;
h_B[i] = 2.0;
hostRef[i] = 0.0;
gpuRef[i] = 0.0;
}
}
void hostFunc(float *h_A, float *h_B, float *hostRef, int N, int offset)
{
int idx;
for(int k = 0, idx = offset; idx < N; idx++,k++)
{
hostRef[k] = h_A[idx] + h_B[idx];
}
}
int compareResults(float *hostRef,float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
if(hostRef[i] != gpuRef[i])
{
return 1;
}
}
return 0;
}
int main()
{
int N;
N = 2 << 20; // # elements in arrays
size_t nBytes = N * sizeof(float); // Size reqiored to stoare arrays
int offset;// To determine the stride size in kernel
printf("\n Enter offset value: ");
scanf("%d",&offset);
/********************* Memory allocation at host side ****************************/
float *h_A = (float *)malloc(nBytes); // Host side input array h_A
float *h_B = (float *)malloc(nBytes); // Host side input array h_B
float *hostRef = (float *)malloc(nBytes); // Host side reference output array hostRef
float *gpuRef = (float *)malloc(nBytes); // Device side reference output array gpuRef
initData(h_A,h_B,hostRef, gpuRef, N); // Data initialization function
hostFunc(h_A, h_B, hostRef, N, offset);
/********************* Memory allocation at device side ****************************/
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((void **)&d_A, nBytes)); // Device side input array d_A
CHECK(hipMalloc((void **)&d_B, nBytes)); // Device side input array d_B
CHECK(hipMalloc((void **)&d_C, nBytes)); // Device side output array d_C
/********************* Data transfer from host to device ****************************/
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
/******************************* Kernel Launch **************************************/
int numT, numB;
numT = 32; // # threads per block
numB = ceil(N/(float)numT); // # blocks
testKernel<<<numB,numT>>>(d_A, d_B, d_C, N, offset); // Kernel to check global memory efficiency
/********************* Data transfer from host to device ****************************/
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); // Transfer data from device to host
int check = compareResults(hostRef,gpuRef, N);
if(check!= 0)
{
printf("\n ALERT!!! CPU and GPU side results are not matching!!!");
}
/******************************* Free device and Host Memories ********************************/
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return(0);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10testKernelPfS_S_ii
.globl _Z10testKernelPfS_S_ii
.p2align 8
.type _Z10testKernelPfS_S_ii,@function
_Z10testKernelPfS_S_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_add_nc_u32_e32 v2, s3, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v2
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v3, 31, v2
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
global_load_b32 v4, v[4:5], off
global_load_b32 v3, v[2:3], off
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v4, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10testKernelPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10testKernelPfS_S_ii, .Lfunc_end0-_Z10testKernelPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10testKernelPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10testKernelPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
Program name: gld_throughput.cu
Author name: Dr. Nileshchandra Pikle
Email: nilesh.pikle@gmail.com
Contact Number: 7276834418
Purpose: Program to demonstrate global memory efficiency
Description: A simple vector addition kernel is written which performs strided access to arrays.
Because of different offset values global memory load effeciency varies. The data
type is float (single precision) hence each thread request 4 bytes of data.
Depends on L1 cache size and different values of offset observe global memory
load efficiency. Use following profiling command
1. nvprof --devices 0 --metrics gld_transactions ./a.out
This metrics returns total number of global memory load transactions
2. nvprof --devices 0 --metrics gld_efficiency ./a.out
This metrics returns global memory load efficiency
To understand Memory coalesing refer following linksW
Link1: https://www.youtube.com/watch?v=mLxZyWOI340
Link2: https://devblogs.nvidia.com/how-access-global-memory-efficiently-cuda-c-kernels/
Link3: https://stackoverflow.com/questions/5041328/in-cuda-what-is-memory-coalescing-and-how-is-it-achieved
*/
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if(error != hipSuccess) \
{ \
printf("Error %s %d", __FILE__, __LINE__); \
printf("\n Code %d Reason %s \n",error, hipGetErrorString(error)); \
exit(1); \
} \
}
__global__ void testKernel(float *d_A, float *d_B, float *d_C, int N, int offset)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int k = gid + offset;
if(k < N)
{
d_C[gid] = d_A[k] + d_B[k];
}
}
void initData(float *h_A, float *h_B, float *hostRef, float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
h_A[i] = 1.0;
h_B[i] = 2.0;
hostRef[i] = 0.0;
gpuRef[i] = 0.0;
}
}
void hostFunc(float *h_A, float *h_B, float *hostRef, int N, int offset)
{
int idx;
for(int k = 0, idx = offset; idx < N; idx++,k++)
{
hostRef[k] = h_A[idx] + h_B[idx];
}
}
int compareResults(float *hostRef,float *gpuRef, int N)
{
for(int i = 0; i < N; i++)
{
if(hostRef[i] != gpuRef[i])
{
return 1;
}
}
return 0;
}
int main()
{
int N;
N = 2 << 20; // # elements in arrays
size_t nBytes = N * sizeof(float); // Size reqiored to stoare arrays
int offset;// To determine the stride size in kernel
printf("\n Enter offset value: ");
scanf("%d",&offset);
/********************* Memory allocation at host side ****************************/
float *h_A = (float *)malloc(nBytes); // Host side input array h_A
float *h_B = (float *)malloc(nBytes); // Host side input array h_B
float *hostRef = (float *)malloc(nBytes); // Host side reference output array hostRef
float *gpuRef = (float *)malloc(nBytes); // Device side reference output array gpuRef
initData(h_A,h_B,hostRef, gpuRef, N); // Data initialization function
hostFunc(h_A, h_B, hostRef, N, offset);
/********************* Memory allocation at device side ****************************/
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((void **)&d_A, nBytes)); // Device side input array d_A
CHECK(hipMalloc((void **)&d_B, nBytes)); // Device side input array d_B
CHECK(hipMalloc((void **)&d_C, nBytes)); // Device side output array d_C
/********************* Data transfer from host to device ****************************/
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
/******************************* Kernel Launch **************************************/
int numT, numB;
numT = 32; // # threads per block
numB = ceil(N/(float)numT); // # blocks
testKernel<<<numB,numT>>>(d_A, d_B, d_C, N, offset); // Kernel to check global memory efficiency
/********************* Data transfer from host to device ****************************/
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); // Transfer data from device to host
int check = compareResults(hostRef,gpuRef, N);
if(check!= 0)
{
printf("\n ALERT!!! CPU and GPU side results are not matching!!!");
}
/******************************* Free device and Host Memories ********************************/
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return(0);
} | .text
.file "11_gld_throughput.hip"
.globl _Z25__device_stub__testKernelPfS_S_ii # -- Begin function _Z25__device_stub__testKernelPfS_S_ii
.p2align 4, 0x90
.type _Z25__device_stub__testKernelPfS_S_ii,@function
_Z25__device_stub__testKernelPfS_S_ii: # @_Z25__device_stub__testKernelPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10testKernelPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z25__device_stub__testKernelPfS_S_ii, .Lfunc_end0-_Z25__device_stub__testKernelPfS_S_ii
.cfi_endproc
# -- End function
.globl _Z8initDataPfS_S_S_i # -- Begin function _Z8initDataPfS_S_S_i
.p2align 4, 0x90
.type _Z8initDataPfS_S_S_i,@function
_Z8initDataPfS_S_S_i: # @_Z8initDataPfS_S_S_i
.cfi_startproc
# %bb.0:
testl %r8d, %r8d
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
movl %r8d, %eax
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $1065353216, (%rdi,%r8,4) # imm = 0x3F800000
movl $1073741824, (%rsi,%r8,4) # imm = 0x40000000
movl $0, (%rdx,%r8,4)
movl $0, (%rcx,%r8,4)
incq %r8
cmpq %r8, %rax
jne .LBB1_2
.LBB1_3: # %._crit_edge
retq
.Lfunc_end1:
.size _Z8initDataPfS_S_S_i, .Lfunc_end1-_Z8initDataPfS_S_S_i
.cfi_endproc
# -- End function
.globl _Z8hostFuncPfS_S_ii # -- Begin function _Z8hostFuncPfS_S_ii
.p2align 4, 0x90
.type _Z8hostFuncPfS_S_ii,@function
_Z8hostFuncPfS_S_ii: # @_Z8hostFuncPfS_S_ii
.cfi_startproc
# %bb.0:
# kill: def $ecx killed $ecx def $rcx
subl %r8d, %ecx
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
movslq %r8d, %r8
leaq (%rdi,%r8,4), %rax
leaq (%rsi,%r8,4), %rsi
xorl %edi, %edi
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rax,%rdi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rsi,%rdi,4), %xmm0
movss %xmm0, (%rdx,%rdi,4)
incq %rdi
cmpq %rdi, %rcx
jne .LBB2_2
.LBB2_3: # %._crit_edge
retq
.Lfunc_end2:
.size _Z8hostFuncPfS_S_ii, .Lfunc_end2-_Z8hostFuncPfS_S_ii
.cfi_endproc
# -- End function
.globl _Z14compareResultsPfS_i # -- Begin function _Z14compareResultsPfS_i
.p2align 4, 0x90
.type _Z14compareResultsPfS_i,@function
_Z14compareResultsPfS_i: # @_Z14compareResultsPfS_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
setg %al
jle .LBB3_6
# %bb.1: # %.lr.ph.preheader
movss (%rdi), %xmm0 # xmm0 = mem[0],zero,zero,zero
ucomiss (%rsi), %xmm0
jne .LBB3_6
jnp .LBB3_2
.LBB3_6: # %._crit_edge
movzbl %al, %eax
retq
.LBB3_2: # %.lr.ph16.preheader
movl %edx, %eax
movl $1, %edx
.p2align 4, 0x90
.LBB3_3: # %.lr.ph16
# =>This Inner Loop Header: Depth=1
movq %rdx, %rcx
cmpq %rdx, %rax
je .LBB3_5
# %bb.4: # %.lr.ph
# in Loop: Header=BB3_3 Depth=1
movss (%rdi,%rcx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
leaq 1(%rcx), %rdx
ucomiss (%rsi,%rcx,4), %xmm0
jne .LBB3_5
jnp .LBB3_3
.LBB3_5: # %._crit_edge.loopexit
cmpq %rax, %rcx
setb %al
movzbl %al, %eax
retq
.Lfunc_end3:
.size _Z14compareResultsPfS_i, .Lfunc_end3-_Z14compareResultsPfS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $160, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
xorl %r13d, %r13d
movl $.L.str, %edi
xorl %eax, %eax
callq printf
leaq 4(%rsp), %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl $8388608, %edi # imm = 0x800000
callq malloc
movq %rax, %rbx
movl $8388608, %edi # imm = 0x800000
callq malloc
movq %rax, %r14
movl $8388608, %edi # imm = 0x800000
callq malloc
movq %rax, %r15
movl $8388608, %edi # imm = 0x800000
callq malloc
movq %rax, %r12
movl $8388608, %edx # imm = 0x800000
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
movl $8388608, %edx # imm = 0x800000
movq %r12, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB4_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl $1065353216, (%rbx,%r13,4) # imm = 0x3F800000
movl $1073741824, (%r14,%r13,4) # imm = 0x40000000
incq %r13
cmpq $2097152, %r13 # imm = 0x200000
jne .LBB4_1
# %bb.2: # %_Z8initDataPfS_S_S_i.exit
movslq 4(%rsp), %rdx
cmpq $2097151, %rdx # imm = 0x1FFFFF
jg .LBB4_5
# %bb.3: # %.lr.ph.preheader.i
movl $2097152, %eax # imm = 0x200000
subl %edx, %eax
leaq (%r14,%rdx,4), %rcx
leaq (%rbx,%rdx,4), %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB4_4: # %.lr.ph.i67
# =>This Inner Loop Header: Depth=1
movss (%rdx,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rcx,%rsi,4), %xmm0
movss %xmm0, (%r15,%rsi,4)
incq %rsi
cmpq %rsi, %rax
jne .LBB4_4
.LBB4_5: # %_Z8hostFuncPfS_S_ii.exit
leaq 24(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
testl %eax, %eax
jne .LBB4_6
# %bb.8:
leaq 16(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
testl %eax, %eax
jne .LBB4_9
# %bb.10:
leaq 8(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
testl %eax, %eax
jne .LBB4_11
# %bb.12:
movq 24(%rsp), %rdi
movl $8388608, %edx # imm = 0x800000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB4_13
# %bb.14:
movq 16(%rsp), %rdi
movl $8388608, %edx # imm = 0x800000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB4_15
# %bb.16:
movabsq $4294967328, %rdx # imm = 0x100000020
leaq 65504(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_18
# %bb.17:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movl 4(%rsp), %esi
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $2097152, 36(%rsp) # imm = 0x200000
movl %esi, 32(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 32(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z10testKernelPfS_S_ii, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_18:
movq 8(%rsp), %rsi
movl $8388608, %edx # imm = 0x800000
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB4_26
# %bb.19: # %.lr.ph.i72.preheader
movss (%r15), %xmm0 # xmm0 = mem[0],zero,zero,zero
ucomiss (%r12), %xmm0
jne .LBB4_24
jp .LBB4_24
# %bb.20: # %.lr.ph.preheader
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB4_21: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq %rcx, %rax
cmpq $2097151, %rcx # imm = 0x1FFFFF
je .LBB4_23
# %bb.22: # %.lr.ph.i72
# in Loop: Header=BB4_21 Depth=1
movss 4(%r15,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
leaq 1(%rax), %rcx
ucomiss 4(%r12,%rax,4), %xmm0
jne .LBB4_23
jnp .LBB4_21
.LBB4_23: # %_Z14compareResultsPfS_i.exit
cmpq $2097150, %rax # imm = 0x1FFFFE
ja .LBB4_25
.LBB4_24: # %.critedge
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
.LBB4_25:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB4_6:
.cfi_def_cfa_offset 208
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $106, %edx
jmp .LBB4_7
.LBB4_9:
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $107, %edx
jmp .LBB4_7
.LBB4_11:
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $108, %edx
jmp .LBB4_7
.LBB4_13:
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $112, %edx
jmp .LBB4_7
.LBB4_15:
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $113, %edx
jmp .LBB4_7
.LBB4_26:
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $124, %edx
.LBB4_7:
movl %eax, %ebx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str.4, %edi
movl %ebx, %esi
movq %rax, %rdx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10testKernelPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10testKernelPfS_S_ii,@object # @_Z10testKernelPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z10testKernelPfS_S_ii
.p2align 3, 0x0
_Z10testKernelPfS_S_ii:
.quad _Z25__device_stub__testKernelPfS_S_ii
.size _Z10testKernelPfS_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n Enter offset value: "
.size .L.str, 23
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Error %s %d"
.size .L.str.2, 12
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/NileshchandraPikle/Introduction-to-CUDA-Programming/master/11_gld_throughput.hip"
.size .L.str.3, 138
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\n Code %d Reason %s \n"
.size .L.str.4, 22
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "\n ALERT!!! CPU and GPU side results are not matching!!!"
.size .L.str.5, 56
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10testKernelPfS_S_ii"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__testKernelPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10testKernelPfS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10testKernelPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ IADD3 R4, R6, c[0x0][0x17c], RZ ; /* 0x00005f0006047a10 */
/* 0x000fc80007ffe0ff */
/*0050*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x178], PT ; /* 0x00005e0004007a0c */
/* 0x000fda0003f06270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0090*/ IMAD.WIDE R2, R4, R7, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fc800078e0207 */
/*00a0*/ IMAD.WIDE R4, R4, R7.reuse, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x080fe400078e0207 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1900 */
/*00d0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00e0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00f0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10testKernelPfS_S_ii
.globl _Z10testKernelPfS_S_ii
.p2align 8
.type _Z10testKernelPfS_S_ii,@function
_Z10testKernelPfS_S_ii:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b64 s[2:3], s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_add_nc_u32_e32 v2, s3, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v2
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v3, 31, v2
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
global_load_b32 v4, v[4:5], off
global_load_b32 v3, v[2:3], off
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v4, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10testKernelPfS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10testKernelPfS_S_ii, .Lfunc_end0-_Z10testKernelPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10testKernelPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10testKernelPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00145458_00000000-6_11_gld_throughput.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8initDataPfS_S_S_i
.type _Z8initDataPfS_S_S_i, @function
_Z8initDataPfS_S_S_i:
.LFB2057:
.cfi_startproc
endbr64
movq %rsi, %r9
testl %r8d, %r8d
jle .L3
movslq %r8d, %r8
leaq 0(,%r8,4), %rsi
movl $0, %eax
movss .LC0(%rip), %xmm1
movss .LC1(%rip), %xmm0
.L5:
movss %xmm1, (%rdi,%rax)
movss %xmm0, (%r9,%rax)
movl $0x00000000, (%rdx,%rax)
movl $0x00000000, (%rcx,%rax)
addq $4, %rax
cmpq %rsi, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z8initDataPfS_S_S_i, .-_Z8initDataPfS_S_S_i
.globl _Z8hostFuncPfS_S_ii
.type _Z8hostFuncPfS_S_ii, @function
_Z8hostFuncPfS_S_ii:
.LFB2058:
.cfi_startproc
endbr64
movq %rdi, %rax
cmpl %ecx, %r8d
jge .L7
movslq %r8d, %r8
movq %r8, %rdi
negq %rdi
leaq (%rdx,%rdi,4), %rdx
.L9:
movss (%rax,%r8,4), %xmm0
addss (%rsi,%r8,4), %xmm0
movss %xmm0, (%rdx,%r8,4)
addq $1, %r8
cmpl %r8d, %ecx
jg .L9
.L7:
ret
.cfi_endproc
.LFE2058:
.size _Z8hostFuncPfS_S_ii, .-_Z8hostFuncPfS_S_ii
.globl _Z14compareResultsPfS_i
.type _Z14compareResultsPfS_i, @function
_Z14compareResultsPfS_i:
.LFB2059:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L15
movslq %edx, %rdx
salq $2, %rdx
movl $0, %eax
.L14:
movss (%rdi,%rax), %xmm0
ucomiss (%rsi,%rax), %xmm0
jp .L16
jne .L16
addq $4, %rax
cmpq %rdx, %rax
jne .L14
movl $0, %eax
ret
.L15:
movl $0, %eax
ret
.L16:
movl $1, %eax
ret
.cfi_endproc
.LFE2059:
.size _Z14compareResultsPfS_i, .-_Z14compareResultsPfS_i
.globl _Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii
.type _Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii, @function
_Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10testKernelPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii, .-_Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii
.globl _Z10testKernelPfS_S_ii
.type _Z10testKernelPfS_S_ii, @function
_Z10testKernelPfS_S_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z10testKernelPfS_S_ii, .-_Z10testKernelPfS_S_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "\n Enter offset value: "
.LC4:
.string "%d"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "/home/ubuntu/Datasets/stackv2/train-structured/NileshchandraPikle/Introduction-to-CUDA-Programming/master/11_gld_throughput.cu"
.section .rodata.str1.1
.LC6:
.string "Error %s %d"
.LC7:
.string "\n Code %d Reason %s \n"
.section .rodata.str1.8
.align 8
.LC8:
.string "\n ALERT!!! CPU and GPU side results are not matching!!!"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq .LC3(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
leaq 4(%rsp), %rsi
leaq .LC4(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movl $8388608, %edi
call malloc@PLT
movq %rax, %rbp
movl $8388608, %edi
call malloc@PLT
movq %rax, %rbx
movl $8388608, %edi
call malloc@PLT
movq %rax, %r12
movl $8388608, %edi
call malloc@PLT
movq %rax, %r13
movl $2097152, %r8d
movq %rax, %rcx
movq %r12, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z8initDataPfS_S_S_i
movl 4(%rsp), %r8d
movl $2097152, %ecx
movq %r12, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z8hostFuncPfS_S_ii
leaq 8(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L38
leaq 16(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L39
leaq 24(%rsp), %rdi
movl $8388608, %esi
call cudaMalloc@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L40
movl $1, %ecx
movl $8388608, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L41
movl $1, %ecx
movl $8388608, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L42
movl $32, 44(%rsp)
movl $1, 48(%rsp)
movl $65536, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L43
.L33:
movl $2, %ecx
movl $8388608, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
testl %eax, %eax
jne .L44
movl $2097152, %edx
movq %r13, %rsi
movq %r12, %rdi
call _Z14compareResultsPfS_i
testl %eax, %eax
jne .L45
.L35:
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L46
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
movl %eax, %r14d
movl $104, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L39:
movl $105, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L40:
movl $106, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L41:
movl $110, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L42:
movl $111, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L43:
movl 4(%rsp), %r8d
movl $2097152, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z36__device_stub__Z10testKernelPfS_S_iiPfS_S_ii
jmp .L33
.L44:
movl $122, %ecx
leaq .LC5(%rip), %rdx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r14d, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
movl %r14d, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L45:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L35
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1
.LC9:
.string "_Z10testKernelPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z10testKernelPfS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.align 4
.LC1:
.long 1073741824
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "11_gld_throughput.hip"
.globl _Z25__device_stub__testKernelPfS_S_ii # -- Begin function _Z25__device_stub__testKernelPfS_S_ii
.p2align 4, 0x90
.type _Z25__device_stub__testKernelPfS_S_ii,@function
_Z25__device_stub__testKernelPfS_S_ii: # @_Z25__device_stub__testKernelPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10testKernelPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z25__device_stub__testKernelPfS_S_ii, .Lfunc_end0-_Z25__device_stub__testKernelPfS_S_ii
.cfi_endproc
# -- End function
.globl _Z8initDataPfS_S_S_i # -- Begin function _Z8initDataPfS_S_S_i
.p2align 4, 0x90
.type _Z8initDataPfS_S_S_i,@function
_Z8initDataPfS_S_S_i: # @_Z8initDataPfS_S_S_i
.cfi_startproc
# %bb.0:
testl %r8d, %r8d
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
movl %r8d, %eax
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $1065353216, (%rdi,%r8,4) # imm = 0x3F800000
movl $1073741824, (%rsi,%r8,4) # imm = 0x40000000
movl $0, (%rdx,%r8,4)
movl $0, (%rcx,%r8,4)
incq %r8
cmpq %r8, %rax
jne .LBB1_2
.LBB1_3: # %._crit_edge
retq
.Lfunc_end1:
.size _Z8initDataPfS_S_S_i, .Lfunc_end1-_Z8initDataPfS_S_S_i
.cfi_endproc
# -- End function
.globl _Z8hostFuncPfS_S_ii # -- Begin function _Z8hostFuncPfS_S_ii
.p2align 4, 0x90
.type _Z8hostFuncPfS_S_ii,@function
_Z8hostFuncPfS_S_ii: # @_Z8hostFuncPfS_S_ii
.cfi_startproc
# %bb.0:
# kill: def $ecx killed $ecx def $rcx
subl %r8d, %ecx
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
movslq %r8d, %r8
leaq (%rdi,%r8,4), %rax
leaq (%rsi,%r8,4), %rsi
xorl %edi, %edi
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rax,%rdi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rsi,%rdi,4), %xmm0
movss %xmm0, (%rdx,%rdi,4)
incq %rdi
cmpq %rdi, %rcx
jne .LBB2_2
.LBB2_3: # %._crit_edge
retq
.Lfunc_end2:
.size _Z8hostFuncPfS_S_ii, .Lfunc_end2-_Z8hostFuncPfS_S_ii
.cfi_endproc
# -- End function
.globl _Z14compareResultsPfS_i # -- Begin function _Z14compareResultsPfS_i
.p2align 4, 0x90
.type _Z14compareResultsPfS_i,@function
_Z14compareResultsPfS_i: # @_Z14compareResultsPfS_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
setg %al
jle .LBB3_6
# %bb.1: # %.lr.ph.preheader
movss (%rdi), %xmm0 # xmm0 = mem[0],zero,zero,zero
ucomiss (%rsi), %xmm0
jne .LBB3_6
jnp .LBB3_2
.LBB3_6: # %._crit_edge
movzbl %al, %eax
retq
.LBB3_2: # %.lr.ph16.preheader
movl %edx, %eax
movl $1, %edx
.p2align 4, 0x90
.LBB3_3: # %.lr.ph16
# =>This Inner Loop Header: Depth=1
movq %rdx, %rcx
cmpq %rdx, %rax
je .LBB3_5
# %bb.4: # %.lr.ph
# in Loop: Header=BB3_3 Depth=1
movss (%rdi,%rcx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
leaq 1(%rcx), %rdx
ucomiss (%rsi,%rcx,4), %xmm0
jne .LBB3_5
jnp .LBB3_3
.LBB3_5: # %._crit_edge.loopexit
cmpq %rax, %rcx
setb %al
movzbl %al, %eax
retq
.Lfunc_end3:
.size _Z14compareResultsPfS_i, .Lfunc_end3-_Z14compareResultsPfS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $160, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
xorl %r13d, %r13d
movl $.L.str, %edi
xorl %eax, %eax
callq printf
leaq 4(%rsp), %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl $8388608, %edi # imm = 0x800000
callq malloc
movq %rax, %rbx
movl $8388608, %edi # imm = 0x800000
callq malloc
movq %rax, %r14
movl $8388608, %edi # imm = 0x800000
callq malloc
movq %rax, %r15
movl $8388608, %edi # imm = 0x800000
callq malloc
movq %rax, %r12
movl $8388608, %edx # imm = 0x800000
movq %r15, %rdi
xorl %esi, %esi
callq memset@PLT
movl $8388608, %edx # imm = 0x800000
movq %r12, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB4_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl $1065353216, (%rbx,%r13,4) # imm = 0x3F800000
movl $1073741824, (%r14,%r13,4) # imm = 0x40000000
incq %r13
cmpq $2097152, %r13 # imm = 0x200000
jne .LBB4_1
# %bb.2: # %_Z8initDataPfS_S_S_i.exit
movslq 4(%rsp), %rdx
cmpq $2097151, %rdx # imm = 0x1FFFFF
jg .LBB4_5
# %bb.3: # %.lr.ph.preheader.i
movl $2097152, %eax # imm = 0x200000
subl %edx, %eax
leaq (%r14,%rdx,4), %rcx
leaq (%rbx,%rdx,4), %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB4_4: # %.lr.ph.i67
# =>This Inner Loop Header: Depth=1
movss (%rdx,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%rcx,%rsi,4), %xmm0
movss %xmm0, (%r15,%rsi,4)
incq %rsi
cmpq %rsi, %rax
jne .LBB4_4
.LBB4_5: # %_Z8hostFuncPfS_S_ii.exit
leaq 24(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
testl %eax, %eax
jne .LBB4_6
# %bb.8:
leaq 16(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
testl %eax, %eax
jne .LBB4_9
# %bb.10:
leaq 8(%rsp), %rdi
movl $8388608, %esi # imm = 0x800000
callq hipMalloc
testl %eax, %eax
jne .LBB4_11
# %bb.12:
movq 24(%rsp), %rdi
movl $8388608, %edx # imm = 0x800000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB4_13
# %bb.14:
movq 16(%rsp), %rdi
movl $8388608, %edx # imm = 0x800000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB4_15
# %bb.16:
movabsq $4294967328, %rdx # imm = 0x100000020
leaq 65504(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_18
# %bb.17:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movl 4(%rsp), %esi
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $2097152, 36(%rsp) # imm = 0x200000
movl %esi, 32(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 32(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z10testKernelPfS_S_ii, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_18:
movq 8(%rsp), %rsi
movl $8388608, %edx # imm = 0x800000
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB4_26
# %bb.19: # %.lr.ph.i72.preheader
movss (%r15), %xmm0 # xmm0 = mem[0],zero,zero,zero
ucomiss (%r12), %xmm0
jne .LBB4_24
jp .LBB4_24
# %bb.20: # %.lr.ph.preheader
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB4_21: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq %rcx, %rax
cmpq $2097151, %rcx # imm = 0x1FFFFF
je .LBB4_23
# %bb.22: # %.lr.ph.i72
# in Loop: Header=BB4_21 Depth=1
movss 4(%r15,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
leaq 1(%rax), %rcx
ucomiss 4(%r12,%rax,4), %xmm0
jne .LBB4_23
jnp .LBB4_21
.LBB4_23: # %_Z14compareResultsPfS_i.exit
cmpq $2097150, %rax # imm = 0x1FFFFE
ja .LBB4_25
.LBB4_24: # %.critedge
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
.LBB4_25:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB4_6:
.cfi_def_cfa_offset 208
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $106, %edx
jmp .LBB4_7
.LBB4_9:
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $107, %edx
jmp .LBB4_7
.LBB4_11:
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $108, %edx
jmp .LBB4_7
.LBB4_13:
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $112, %edx
jmp .LBB4_7
.LBB4_15:
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $113, %edx
jmp .LBB4_7
.LBB4_26:
movl $.L.str.2, %edi
movl $.L.str.3, %esi
movl $124, %edx
.LBB4_7:
movl %eax, %ebx
xorl %eax, %eax
callq printf
movl %ebx, %edi
callq hipGetErrorString
movl $.L.str.4, %edi
movl %ebx, %esi
movq %rax, %rdx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10testKernelPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10testKernelPfS_S_ii,@object # @_Z10testKernelPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z10testKernelPfS_S_ii
.p2align 3, 0x0
_Z10testKernelPfS_S_ii:
.quad _Z25__device_stub__testKernelPfS_S_ii
.size _Z10testKernelPfS_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n Enter offset value: "
.size .L.str, 23
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Error %s %d"
.size .L.str.2, 12
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/NileshchandraPikle/Introduction-to-CUDA-Programming/master/11_gld_throughput.hip"
.size .L.str.3, 138
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\n Code %d Reason %s \n"
.size .L.str.4, 22
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "\n ALERT!!! CPU and GPU side results are not matching!!!"
.size .L.str.5, 56
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10testKernelPfS_S_ii"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__testKernelPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10testKernelPfS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
int main()
{
double *matrix, *d_A;
matrix = (double *)calloc(1000, sizeof(double));
cudaMalloc( &d_A, 1000*sizeof(double));
cudaMemcpy(d_A, matrix, 1000*sizeof(double), cudaMemcpyHostToDevice);
printf("\nthe first element of matrix is %f\n", matrix[0]);
cudaFree(d_A);
free(matrix);
return 0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
int main()
{
double *matrix, *d_A;
matrix = (double *)calloc(1000, sizeof(double));
cudaMalloc( &d_A, 1000*sizeof(double));
cudaMemcpy(d_A, matrix, 1000*sizeof(double), cudaMemcpyHostToDevice);
printf("\nthe first element of matrix is %f\n", matrix[0]);
cudaFree(d_A);
free(matrix);
return 0;
} | .file "tmpxft_00090476_00000000-6_nothing.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "\nthe first element of matrix is %f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movl $8, %esi
movl $1000, %edi
call calloc@PLT
movq %rax, %rbx
movq %rsp, %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $8000, %edx
movq %rbx, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movsd (%rbx), %xmm0
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L6
movl $0, %eax
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
int main()
{
double *matrix, *d_A;
matrix = (double *)calloc(1000, sizeof(double));
cudaMalloc( &d_A, 1000*sizeof(double));
cudaMemcpy(d_A, matrix, 1000*sizeof(double), cudaMemcpyHostToDevice);
printf("\nthe first element of matrix is %f\n", matrix[0]);
cudaFree(d_A);
free(matrix);
return 0;
} | #include "hip/hip_runtime.h"
#include "stdio.h"
int main()
{
double *matrix, *d_A;
matrix = (double *)calloc(1000, sizeof(double));
hipMalloc( &d_A, 1000*sizeof(double));
hipMemcpy(d_A, matrix, 1000*sizeof(double), hipMemcpyHostToDevice);
printf("\nthe first element of matrix is %f\n", matrix[0]);
hipFree(d_A);
free(matrix);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "hip/hip_runtime.h"
#include "stdio.h"
int main()
{
double *matrix, *d_A;
matrix = (double *)calloc(1000, sizeof(double));
hipMalloc( &d_A, 1000*sizeof(double));
hipMemcpy(d_A, matrix, 1000*sizeof(double), hipMemcpyHostToDevice);
printf("\nthe first element of matrix is %f\n", matrix[0]);
hipFree(d_A);
free(matrix);
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "hip/hip_runtime.h"
#include "stdio.h"
int main()
{
double *matrix, *d_A;
matrix = (double *)calloc(1000, sizeof(double));
hipMalloc( &d_A, 1000*sizeof(double));
hipMemcpy(d_A, matrix, 1000*sizeof(double), hipMemcpyHostToDevice);
printf("\nthe first element of matrix is %f\n", matrix[0]);
hipFree(d_A);
free(matrix);
return 0;
} | .text
.file "nothing.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movl $1000, %edi # imm = 0x3E8
movl $8, %esi
callq calloc
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movq 8(%rsp), %rdi
movl $8000, %edx # imm = 0x1F40
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movsd (%rbx), %xmm0 # xmm0 = mem[0],zero
movl $.L.str, %edi
movb $1, %al
callq printf
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
xorl %eax, %eax
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\nthe first element of matrix is %f\n"
.size .L.str, 36
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00090476_00000000-6_nothing.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "\nthe first element of matrix is %f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movl $8, %esi
movl $1000, %edi
call calloc@PLT
movq %rax, %rbx
movq %rsp, %rdi
movl $8000, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $8000, %edx
movq %rbx, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movsd (%rbx), %xmm0
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L6
movl $0, %eax
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "nothing.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movl $1000, %edi # imm = 0x3E8
movl $8, %esi
callq calloc
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $8000, %esi # imm = 0x1F40
callq hipMalloc
movq 8(%rsp), %rdi
movl $8000, %edx # imm = 0x1F40
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movsd (%rbx), %xmm0 # xmm0 = mem[0],zero
movl $.L.str, %edi
movb $1, %al
callq printf
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
xorl %eax, %eax
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\nthe first element of matrix is %f\n"
.size .L.str, 36
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | typedef int2 Record;
__global__ void
mapImpl_kernel(Record *d_R, int delta, int rLen,int *d_output1, int *d_output2)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_output1[pos]=d_R[pos].x;
d_output2[pos]=d_R[pos].y;
}
} | code for sm_80
Function : _Z14mapImpl_kernelP4int2iiPiS1_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2UR UR4, SR_CTAID.X ; /* 0x00000000000479c3 */
/* 0x000e220000002500 */
/*0020*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002200 */
/*0030*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000ea20000002100 */
/*0050*/ IADD3 R0, R0, UR4, RZ ; /* 0x0000000400007c10 */
/* 0x001fca000fffe0ff */
/*0060*/ IMAD R0, R3, c[0x0][0xc], R0 ; /* 0x0000030003007a24 */
/* 0x002fc800078e0200 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x004fca00078e0205 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ I2F.U32.RP R5, c[0x0][0x168] ; /* 0x00005a0000057b06 */
/* 0x000e220000209000 */
/*00b0*/ LOP3.LUT R4, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff047212 */
/* 0x000fe200078e33ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00d0*/ ISETP.NE.U32.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fe20003f45070 */
/*00e0*/ BSSY B0, 0x3a0 ; /* 0x000002b000007945 */
/* 0x000fe20003800000 */
/*00f0*/ IADD3 R4, R4, c[0x0][0x16c], RZ ; /* 0x00005b0004047a10 */
/* 0x000fc60007ffe0ff */
/*0100*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R2, R5, 0xffffffe, RZ ; /* 0x0ffffffe05027810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD.MOV R7, RZ, RZ, -R3 ; /* 0x000000ffff077224 */
/* 0x002fc800078e0a03 */
/*0150*/ IMAD R7, R7, c[0x0][0x168], RZ ; /* 0x00005a0007077a24 */
/* 0x000fca00078e02ff */
/*0160*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fcc00078e0002 */
/*0170*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0180*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a03 */
/*0190*/ IMAD R4, R5, c[0x0][0x168], R4 ; /* 0x00005a0005047a24 */
/* 0x000fca00078e0204 */
/*01a0*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x168], PT ; /* 0x00005a0004007a0c */
/* 0x000fda0003f06070 */
/*01b0*/ @P0 IADD3 R4, R4, -c[0x0][0x168], RZ ; /* 0x80005a0004040a10 */
/* 0x000fe40007ffe0ff */
/*01c0*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fe40007ffe0ff */
/*01d0*/ ISETP.GE.U32.AND P1, PT, R4, c[0x0][0x168], PT ; /* 0x00005a0004007a0c */
/* 0x000fda0003f26070 */
/*01e0*/ @P1 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103031810 */
/* 0x000fe40007ffe0ff */
/*01f0*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x168], RZ, 0x33, !PT ; /* 0x00005a00ff03aa12 */
/* 0x000fc800078e33ff */
/*0200*/ IADD3 R2, R3.reuse, 0x1, RZ ; /* 0x0000000103027810 */
/* 0x040fe40007ffe0ff */
/*0210*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fe40003f06070 */
/*0220*/ LOP3.LUT P1, R4, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302047812 */
/* 0x000fda000782c0ff */
/*0230*/ @!P1 BRA 0x390 ; /* 0x0000015000009947 */
/* 0x000fea0003800000 */
/*0240*/ MOV R17, 0x8 ; /* 0x0000000800117802 */
/* 0x000fe20000000f00 */
/*0250*/ IMAD.MOV.U32 R15, RZ, RZ, 0x4 ; /* 0x00000004ff0f7424 */
/* 0x000fe200078e00ff */
/*0260*/ MOV R10, R4 ; /* 0x00000004000a7202 */
/* 0x000fc60000000f00 */
/*0270*/ IMAD.WIDE R2, R0, R17, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0211 */
/*0280*/ IMAD.WIDE R4, R0, R15, c[0x0][0x178] ; /* 0x00005e0000047625 */
/* 0x000fe200078e020f */
/*0290*/ IADD3 R6, P1, R2, 0x4, RZ ; /* 0x0000000402067810 */
/* 0x000fca0007f3e0ff */
/*02a0*/ IMAD.X R7, RZ, RZ, R3, P1 ; /* 0x000000ffff077224 */
/* 0x000fe400008e0603 */
/*02b0*/ IMAD.WIDE R2, R0, R15, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fc800078e020f */
/*02c0*/ MOV R8, R6 ; /* 0x0000000600087202 */
/* 0x000fe40000000f00 */
/*02d0*/ MOV R9, R7 ; /* 0x0000000700097202 */
/* 0x000fca0000000f00 */
/*02e0*/ LDG.E R11, [R8.64+-0x4] ; /* 0xfffffc04080b7981 */
/* 0x000ea2000c1e1900 */
/*02f0*/ IADD3 R10, R10, -0x1, RZ ; /* 0xffffffff0a0a7810 */
/* 0x000fc80007ffe0ff */
/*0300*/ ISETP.NE.AND P1, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe20003f25270 */
/*0310*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0041e8000c101904 */
/*0320*/ LDG.E R13, [R8.64] ; /* 0x00000004080d7981 */
/* 0x000ea2000c1e1900 */
/*0330*/ IMAD.WIDE R6, R17, c[0x0][0x168], R8 ; /* 0x00005a0011067a25 */
/* 0x000fe200078e0208 */
/*0340*/ IADD3 R0, R0, c[0x0][0x168], RZ ; /* 0x00005a0000007a10 */
/* 0x000fc60007ffe0ff */
/*0350*/ IMAD.WIDE R2, R15.reuse, c[0x0][0x168], R2 ; /* 0x00005a000f027a25 */
/* 0x041fe200078e0202 */
/*0360*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */
/* 0x0041e6000c101904 */
/*0370*/ IMAD.WIDE R4, R15, c[0x0][0x168], R4 ; /* 0x00005a000f047a25 */
/* 0x001fe200078e0204 */
/*0380*/ @P1 BRA 0x2c0 ; /* 0xffffff3000001947 */
/* 0x000fea000383ffff */
/*0390*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03a0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*03b0*/ IMAD.MOV.U32 R27, RZ, RZ, 0x8 ; /* 0x00000008ff1b7424 */
/* 0x000fc800078e00ff */
/*03c0*/ IMAD.WIDE R4, R0, R27, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fca00078e021b */
/*03d0*/ LDG.E R19, [R4.64] ; /* 0x0000000404137981 */
/* 0x000ea2000c1e1900 */
/*03e0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x002fd400000001ff */
/*03f0*/ IMAD.WIDE R6, R0, R3, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fca00078e0203 */
/*0400*/ STG.E [R6.64], R19 ; /* 0x0000001306007986 */
/* 0x0041e8000c101904 */
/*0410*/ LDG.E R21, [R4.64+0x4] ; /* 0x0000040404157981 */
/* 0x000ea2000c1e1900 */
/*0420*/ IMAD.WIDE R8, R0, R3, c[0x0][0x178] ; /* 0x00005e0000087625 */
/* 0x000fc800078e0203 */
/*0430*/ IMAD.WIDE R10, R27, c[0x0][0x168], R4 ; /* 0x00005a001b0a7a25 */
/* 0x000fe200078e0204 */
/*0440*/ STG.E [R8.64], R21 ; /* 0x0000001508007986 */
/* 0x0043e8000c101904 */
/*0450*/ LDG.E R23, [R10.64] ; /* 0x000000040a177981 */
/* 0x000ea2000c1e1900 */
/*0460*/ IMAD.WIDE R12, R3, c[0x0][0x168], R6 ; /* 0x00005a00030c7a25 */
/* 0x000fca00078e0206 */
/*0470*/ STG.E [R12.64], R23 ; /* 0x000000170c007986 */
/* 0x0045e8000c101904 */
/*0480*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee2000c1e1900 */
/*0490*/ IMAD.WIDE R14, R3, c[0x0][0x168], R8 ; /* 0x00005a00030e7a25 */
/* 0x000fc800078e0208 */
/*04a0*/ IMAD.WIDE R16, R27, c[0x0][0x168], R10 ; /* 0x00005a001b107a25 */
/* 0x000fe200078e020a */
/*04b0*/ STG.E [R14.64], R25 ; /* 0x000000190e007986 */
/* 0x0087e8000c101904 */
/*04c0*/ LDG.E R19, [R16.64] ; /* 0x0000000410137981 */
/* 0x001f22000c1e1900 */
/*04d0*/ IMAD.WIDE R4, R3, c[0x0][0x168], R12 ; /* 0x00005a0003047a25 */
/* 0x000fca00078e020c */
/*04e0*/ STG.E [R4.64], R19 ; /* 0x0000001304007986 */
/* 0x0101e8000c101904 */
/*04f0*/ LDG.E R21, [R16.64+0x4] ; /* 0x0000040410157981 */
/* 0x002f22000c1e1900 */
/*0500*/ IMAD.WIDE R6, R3, c[0x0][0x168], R14 ; /* 0x00005a0003067a25 */
/* 0x000fc800078e020e */
/*0510*/ IMAD.WIDE R8, R27, c[0x0][0x168], R16 ; /* 0x00005a001b087a25 */
/* 0x000fe200078e0210 */
/*0520*/ STG.E [R6.64], R21 ; /* 0x0000001506007986 */
/* 0x0103e8000c101904 */
/*0530*/ LDG.E R13, [R8.64] ; /* 0x00000004080d7981 */
/* 0x004ea2000c1e1900 */
/*0540*/ IMAD.WIDE R10, R3, c[0x0][0x168], R4 ; /* 0x00005a00030a7a25 */
/* 0x000fe200078e0204 */
/*0550*/ MOV R23, c[0x0][0x168] ; /* 0x00005a0000177a02 */
/* 0x000fc80000000f00 */
/*0560*/ STG.E [R10.64], R13 ; /* 0x0000000d0a007986 */
/* 0x0043e8000c101904 */
/*0570*/ LDG.E R15, [R8.64+0x4] ; /* 0x00000404080f7981 */
/* 0x008ea2000c1e1900 */
/*0580*/ IMAD.WIDE R2, R3, c[0x0][0x168], R6 ; /* 0x00005a0003027a25 */
/* 0x000fe200078e0206 */
/*0590*/ LEA R0, R23, R0, 0x1 ; /* 0x0000000017007211 */
/* 0x000fc600078e08ff */
/*05a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff057624 */
/* 0x001fca00078e00ff */
/*05b0*/ LEA R0, R5, R0, 0x1 ; /* 0x0000000005007211 */
/* 0x000fc800078e08ff */
/*05c0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fe20003f06270 */
/*05d0*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0043d8000c101904 */
/*05e0*/ @!P0 BRA 0x3b0 ; /* 0xfffffdc000008947 */
/* 0x000fea000383ffff */
/*05f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0600*/ BRA 0x600; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0610*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0620*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0630*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0640*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0680*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0690*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | typedef int2 Record;
__global__ void
mapImpl_kernel(Record *d_R, int delta, int rLen,int *d_output1, int *d_output2)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_output1[pos]=d_R[pos].x;
d_output2[pos]=d_R[pos].y;
}
} | .file "tmpxft_00091dab_00000000-6_temp.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_
.type _Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_, @function
_Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14mapImpl_kernelP4int2iiPiS1_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_, .-_Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_
.globl _Z14mapImpl_kernelP4int2iiPiS1_
.type _Z14mapImpl_kernelP4int2iiPiS1_, @function
_Z14mapImpl_kernelP4int2iiPiS1_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z14mapImpl_kernelP4int2iiPiS1_, .-_Z14mapImpl_kernelP4int2iiPiS1_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z14mapImpl_kernelP4int2iiPiS1_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14mapImpl_kernelP4int2iiPiS1_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | typedef int2 Record;
__global__ void
mapImpl_kernel(Record *d_R, int delta, int rLen,int *d_output1, int *d_output2)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_output1[pos]=d_R[pos].x;
d_output2[pos]=d_R[pos].y;
}
} | #include <hip/hip_runtime.h>
typedef int2 Record;
__global__ void
mapImpl_kernel(Record *d_R, int delta, int rLen,int *d_output1, int *d_output2)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_output1[pos]=d_R[pos].x;
d_output2[pos]=d_R[pos].y;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
typedef int2 Record;
__global__ void
mapImpl_kernel(Record *d_R, int delta, int rLen,int *d_output1, int *d_output2)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_output1[pos]=d_R[pos].x;
d_output2[pos]=d_R[pos].y;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.globl _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.p2align 8
.type _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_,@function
_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x20
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s12, s[0:1], 0xc
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v2, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s15
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_add3_u32 v3, s2, s14, v1
s_and_b32 s2, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, v3, s2, v[2:3]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s12, v0
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b64 s[8:9], s[0:1], 0x0
s_load_b32 s2, s[0:1], 0x8
v_ashrrev_i32_e32 v1, 31, v0
s_load_b128 s[4:7], s[0:1], 0x10
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 3, v[0:1]
v_lshlrev_b64 v[1:2], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, v3, s8
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s9, v4, vcc_lo
s_ashr_i32 s3, s2, 31
v_add_co_u32 v3, vcc_lo, v3, 4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_lshl_b64 s[8:9], s[2:3], 3
s_lshl_b64 s[10:11], s[2:3], 2
.p2align 6
.LBB0_2:
global_load_b32 v7, v[3:4], off offset:-4
v_add_co_u32 v5, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v2, vcc_lo
v_add_nc_u32_e32 v0, s2, v0
s_waitcnt vmcnt(0)
global_store_b32 v[5:6], v7, off
global_load_b32 v7, v[3:4], off
v_add_co_u32 v5, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v2, vcc_lo
v_add_co_u32 v1, vcc_lo, v1, s10
v_add_co_ci_u32_e32 v2, vcc_lo, s11, v2, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s12, v0
v_add_co_u32 v3, s0, v3, s8
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s0, s9, v4, s0
s_or_b32 s1, vcc_lo, s1
s_waitcnt vmcnt(0)
global_store_b32 v[5:6], v7, off
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_, .Lfunc_end0-_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
typedef int2 Record;
__global__ void
mapImpl_kernel(Record *d_R, int delta, int rLen,int *d_output1, int *d_output2)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//Record value;
for(int pos=resultID;pos<rLen;pos+=delta)
{
//value=d_R[pos];
d_output1[pos]=d_R[pos].x;
d_output2[pos]=d_R[pos].y;
}
} | .text
.file "temp.hip"
.globl _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_ # -- Begin function _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.p2align 4, 0x90
.type _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_,@function
_Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_: # @_Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_, .Lfunc_end0-_Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_,@object # @_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.section .rodata,"a",@progbits
.globl _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.p2align 3, 0x0
_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_:
.quad _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.size _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_"
.size .L__unnamed_1, 51
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z14mapImpl_kernelP4int2iiPiS1_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2UR UR4, SR_CTAID.X ; /* 0x00000000000479c3 */
/* 0x000e220000002500 */
/*0020*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002200 */
/*0030*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000ea20000002100 */
/*0050*/ IADD3 R0, R0, UR4, RZ ; /* 0x0000000400007c10 */
/* 0x001fca000fffe0ff */
/*0060*/ IMAD R0, R3, c[0x0][0xc], R0 ; /* 0x0000030003007a24 */
/* 0x002fc800078e0200 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x004fca00078e0205 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ I2F.U32.RP R5, c[0x0][0x168] ; /* 0x00005a0000057b06 */
/* 0x000e220000209000 */
/*00b0*/ LOP3.LUT R4, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff047212 */
/* 0x000fe200078e33ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00d0*/ ISETP.NE.U32.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fe20003f45070 */
/*00e0*/ BSSY B0, 0x3a0 ; /* 0x000002b000007945 */
/* 0x000fe20003800000 */
/*00f0*/ IADD3 R4, R4, c[0x0][0x16c], RZ ; /* 0x00005b0004047a10 */
/* 0x000fc60007ffe0ff */
/*0100*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R2, R5, 0xffffffe, RZ ; /* 0x0ffffffe05027810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD.MOV R7, RZ, RZ, -R3 ; /* 0x000000ffff077224 */
/* 0x002fc800078e0a03 */
/*0150*/ IMAD R7, R7, c[0x0][0x168], RZ ; /* 0x00005a0007077a24 */
/* 0x000fca00078e02ff */
/*0160*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fcc00078e0002 */
/*0170*/ IMAD.HI.U32 R3, R3, R4, RZ ; /* 0x0000000403037227 */
/* 0x000fc800078e00ff */
/*0180*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a03 */
/*0190*/ IMAD R4, R5, c[0x0][0x168], R4 ; /* 0x00005a0005047a24 */
/* 0x000fca00078e0204 */
/*01a0*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x168], PT ; /* 0x00005a0004007a0c */
/* 0x000fda0003f06070 */
/*01b0*/ @P0 IADD3 R4, R4, -c[0x0][0x168], RZ ; /* 0x80005a0004040a10 */
/* 0x000fe40007ffe0ff */
/*01c0*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fe40007ffe0ff */
/*01d0*/ ISETP.GE.U32.AND P1, PT, R4, c[0x0][0x168], PT ; /* 0x00005a0004007a0c */
/* 0x000fda0003f26070 */
/*01e0*/ @P1 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103031810 */
/* 0x000fe40007ffe0ff */
/*01f0*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x168], RZ, 0x33, !PT ; /* 0x00005a00ff03aa12 */
/* 0x000fc800078e33ff */
/*0200*/ IADD3 R2, R3.reuse, 0x1, RZ ; /* 0x0000000103027810 */
/* 0x040fe40007ffe0ff */
/*0210*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fe40003f06070 */
/*0220*/ LOP3.LUT P1, R4, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302047812 */
/* 0x000fda000782c0ff */
/*0230*/ @!P1 BRA 0x390 ; /* 0x0000015000009947 */
/* 0x000fea0003800000 */
/*0240*/ MOV R17, 0x8 ; /* 0x0000000800117802 */
/* 0x000fe20000000f00 */
/*0250*/ IMAD.MOV.U32 R15, RZ, RZ, 0x4 ; /* 0x00000004ff0f7424 */
/* 0x000fe200078e00ff */
/*0260*/ MOV R10, R4 ; /* 0x00000004000a7202 */
/* 0x000fc60000000f00 */
/*0270*/ IMAD.WIDE R2, R0, R17, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0211 */
/*0280*/ IMAD.WIDE R4, R0, R15, c[0x0][0x178] ; /* 0x00005e0000047625 */
/* 0x000fe200078e020f */
/*0290*/ IADD3 R6, P1, R2, 0x4, RZ ; /* 0x0000000402067810 */
/* 0x000fca0007f3e0ff */
/*02a0*/ IMAD.X R7, RZ, RZ, R3, P1 ; /* 0x000000ffff077224 */
/* 0x000fe400008e0603 */
/*02b0*/ IMAD.WIDE R2, R0, R15, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fc800078e020f */
/*02c0*/ MOV R8, R6 ; /* 0x0000000600087202 */
/* 0x000fe40000000f00 */
/*02d0*/ MOV R9, R7 ; /* 0x0000000700097202 */
/* 0x000fca0000000f00 */
/*02e0*/ LDG.E R11, [R8.64+-0x4] ; /* 0xfffffc04080b7981 */
/* 0x000ea2000c1e1900 */
/*02f0*/ IADD3 R10, R10, -0x1, RZ ; /* 0xffffffff0a0a7810 */
/* 0x000fc80007ffe0ff */
/*0300*/ ISETP.NE.AND P1, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe20003f25270 */
/*0310*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0041e8000c101904 */
/*0320*/ LDG.E R13, [R8.64] ; /* 0x00000004080d7981 */
/* 0x000ea2000c1e1900 */
/*0330*/ IMAD.WIDE R6, R17, c[0x0][0x168], R8 ; /* 0x00005a0011067a25 */
/* 0x000fe200078e0208 */
/*0340*/ IADD3 R0, R0, c[0x0][0x168], RZ ; /* 0x00005a0000007a10 */
/* 0x000fc60007ffe0ff */
/*0350*/ IMAD.WIDE R2, R15.reuse, c[0x0][0x168], R2 ; /* 0x00005a000f027a25 */
/* 0x041fe200078e0202 */
/*0360*/ STG.E [R4.64], R13 ; /* 0x0000000d04007986 */
/* 0x0041e6000c101904 */
/*0370*/ IMAD.WIDE R4, R15, c[0x0][0x168], R4 ; /* 0x00005a000f047a25 */
/* 0x001fe200078e0204 */
/*0380*/ @P1 BRA 0x2c0 ; /* 0xffffff3000001947 */
/* 0x000fea000383ffff */
/*0390*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03a0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*03b0*/ IMAD.MOV.U32 R27, RZ, RZ, 0x8 ; /* 0x00000008ff1b7424 */
/* 0x000fc800078e00ff */
/*03c0*/ IMAD.WIDE R4, R0, R27, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fca00078e021b */
/*03d0*/ LDG.E R19, [R4.64] ; /* 0x0000000404137981 */
/* 0x000ea2000c1e1900 */
/*03e0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x002fd400000001ff */
/*03f0*/ IMAD.WIDE R6, R0, R3, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fca00078e0203 */
/*0400*/ STG.E [R6.64], R19 ; /* 0x0000001306007986 */
/* 0x0041e8000c101904 */
/*0410*/ LDG.E R21, [R4.64+0x4] ; /* 0x0000040404157981 */
/* 0x000ea2000c1e1900 */
/*0420*/ IMAD.WIDE R8, R0, R3, c[0x0][0x178] ; /* 0x00005e0000087625 */
/* 0x000fc800078e0203 */
/*0430*/ IMAD.WIDE R10, R27, c[0x0][0x168], R4 ; /* 0x00005a001b0a7a25 */
/* 0x000fe200078e0204 */
/*0440*/ STG.E [R8.64], R21 ; /* 0x0000001508007986 */
/* 0x0043e8000c101904 */
/*0450*/ LDG.E R23, [R10.64] ; /* 0x000000040a177981 */
/* 0x000ea2000c1e1900 */
/*0460*/ IMAD.WIDE R12, R3, c[0x0][0x168], R6 ; /* 0x00005a00030c7a25 */
/* 0x000fca00078e0206 */
/*0470*/ STG.E [R12.64], R23 ; /* 0x000000170c007986 */
/* 0x0045e8000c101904 */
/*0480*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee2000c1e1900 */
/*0490*/ IMAD.WIDE R14, R3, c[0x0][0x168], R8 ; /* 0x00005a00030e7a25 */
/* 0x000fc800078e0208 */
/*04a0*/ IMAD.WIDE R16, R27, c[0x0][0x168], R10 ; /* 0x00005a001b107a25 */
/* 0x000fe200078e020a */
/*04b0*/ STG.E [R14.64], R25 ; /* 0x000000190e007986 */
/* 0x0087e8000c101904 */
/*04c0*/ LDG.E R19, [R16.64] ; /* 0x0000000410137981 */
/* 0x001f22000c1e1900 */
/*04d0*/ IMAD.WIDE R4, R3, c[0x0][0x168], R12 ; /* 0x00005a0003047a25 */
/* 0x000fca00078e020c */
/*04e0*/ STG.E [R4.64], R19 ; /* 0x0000001304007986 */
/* 0x0101e8000c101904 */
/*04f0*/ LDG.E R21, [R16.64+0x4] ; /* 0x0000040410157981 */
/* 0x002f22000c1e1900 */
/*0500*/ IMAD.WIDE R6, R3, c[0x0][0x168], R14 ; /* 0x00005a0003067a25 */
/* 0x000fc800078e020e */
/*0510*/ IMAD.WIDE R8, R27, c[0x0][0x168], R16 ; /* 0x00005a001b087a25 */
/* 0x000fe200078e0210 */
/*0520*/ STG.E [R6.64], R21 ; /* 0x0000001506007986 */
/* 0x0103e8000c101904 */
/*0530*/ LDG.E R13, [R8.64] ; /* 0x00000004080d7981 */
/* 0x004ea2000c1e1900 */
/*0540*/ IMAD.WIDE R10, R3, c[0x0][0x168], R4 ; /* 0x00005a00030a7a25 */
/* 0x000fe200078e0204 */
/*0550*/ MOV R23, c[0x0][0x168] ; /* 0x00005a0000177a02 */
/* 0x000fc80000000f00 */
/*0560*/ STG.E [R10.64], R13 ; /* 0x0000000d0a007986 */
/* 0x0043e8000c101904 */
/*0570*/ LDG.E R15, [R8.64+0x4] ; /* 0x00000404080f7981 */
/* 0x008ea2000c1e1900 */
/*0580*/ IMAD.WIDE R2, R3, c[0x0][0x168], R6 ; /* 0x00005a0003027a25 */
/* 0x000fe200078e0206 */
/*0590*/ LEA R0, R23, R0, 0x1 ; /* 0x0000000017007211 */
/* 0x000fc600078e08ff */
/*05a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff057624 */
/* 0x001fca00078e00ff */
/*05b0*/ LEA R0, R5, R0, 0x1 ; /* 0x0000000005007211 */
/* 0x000fc800078e08ff */
/*05c0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fe20003f06270 */
/*05d0*/ STG.E [R2.64], R15 ; /* 0x0000000f02007986 */
/* 0x0043d8000c101904 */
/*05e0*/ @!P0 BRA 0x3b0 ; /* 0xfffffdc000008947 */
/* 0x000fea000383ffff */
/*05f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0600*/ BRA 0x600; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0610*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0620*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0630*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0640*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0680*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0690*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*06f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.globl _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.p2align 8
.type _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_,@function
_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x20
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s12, s[0:1], 0xc
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v2, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s15
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_add3_u32 v3, s2, s14, v1
s_and_b32 s2, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, v3, s2, v[2:3]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s12, v0
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b64 s[8:9], s[0:1], 0x0
s_load_b32 s2, s[0:1], 0x8
v_ashrrev_i32_e32 v1, 31, v0
s_load_b128 s[4:7], s[0:1], 0x10
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 3, v[0:1]
v_lshlrev_b64 v[1:2], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, v3, s8
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s9, v4, vcc_lo
s_ashr_i32 s3, s2, 31
v_add_co_u32 v3, vcc_lo, v3, 4
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_lshl_b64 s[8:9], s[2:3], 3
s_lshl_b64 s[10:11], s[2:3], 2
.p2align 6
.LBB0_2:
global_load_b32 v7, v[3:4], off offset:-4
v_add_co_u32 v5, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v2, vcc_lo
v_add_nc_u32_e32 v0, s2, v0
s_waitcnt vmcnt(0)
global_store_b32 v[5:6], v7, off
global_load_b32 v7, v[3:4], off
v_add_co_u32 v5, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v2, vcc_lo
v_add_co_u32 v1, vcc_lo, v1, s10
v_add_co_ci_u32_e32 v2, vcc_lo, s11, v2, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s12, v0
v_add_co_u32 v3, s0, v3, s8
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s0, s9, v4, s0
s_or_b32 s1, vcc_lo, s1
s_waitcnt vmcnt(0)
global_store_b32 v[5:6], v7, off
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_, .Lfunc_end0-_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00091dab_00000000-6_temp.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_
.type _Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_, @function
_Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14mapImpl_kernelP4int2iiPiS1_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_, .-_Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_
.globl _Z14mapImpl_kernelP4int2iiPiS1_
.type _Z14mapImpl_kernelP4int2iiPiS1_, @function
_Z14mapImpl_kernelP4int2iiPiS1_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z14mapImpl_kernelP4int2iiPiS1_P4int2iiPiS1_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z14mapImpl_kernelP4int2iiPiS1_, .-_Z14mapImpl_kernelP4int2iiPiS1_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z14mapImpl_kernelP4int2iiPiS1_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14mapImpl_kernelP4int2iiPiS1_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "temp.hip"
.globl _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_ # -- Begin function _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.p2align 4, 0x90
.type _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_,@function
_Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_: # @_Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_, .Lfunc_end0-_Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_,@object # @_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.section .rodata,"a",@progbits
.globl _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.p2align 3, 0x0
_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_:
.quad _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.size _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_"
.size .L__unnamed_1, 51
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14mapImpl_kernelP15HIP_vector_typeIiLj2EEiiPiS2_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //
// main.cpp
// matrix-test
//
// Created by Nikita Makarov on 22/03/16.
// Copyright © 2016 Nikita Makarov. All rights reserved.
//
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <iomanip>
using namespace std;
const double eps = 10e-7;
void print_matrix(double **M, long n, long m)
{
cout.setf(ios::scientific);
cout.precision(10);
for (long i = 0; i < n; i++) {
cout << M[i][0];
for (long j = 1; j < m; j++) {
cout << " " << M[i][j];
}
cout << endl;
}
}
void swap_rows(long *prm, long i, long j)
{
long temp = prm[i];
prm[i] = prm[j];
prm[j] = temp;
}
bool zero_column(double **A, long *prm, long n, long i, long j)
{
for ( ; i < n; i++) {
if (fabs(A[prm[i]][j]) > eps) {
return false;
}
}
return true;
}
void solve_equation(double **A, double **X, double **B, long n, long m, long k)
{
long *prm = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
prm[i] = i;
}
long *x_index = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
x_index[i] = i;
}
long row = 0;
long col = 0;
for ( ; row < n && col < m; row++, col++) {
long max_value_row = row;
for (long i = row + 1; i < n; i++) {
if (fabs(A[prm[i]][col]) > fabs(A[prm[max_value_row]][col])) {
max_value_row = i;
}
}
swap_rows(prm, row, max_value_row);
if (fabs(A[prm[row]][col]) > eps) {
x_index[row] = col;
for (long i = row + 1; i < n; i++) {
double factor = -A[prm[i]][col] / A[prm[row]][col];
for (long j = col; j < m; j++) {
A[prm[i]][j] += A[prm[row]][j] * factor;
}
for (long j = 0; j < k; j++) {
B[prm[i]][j] += B[prm[row]][j] * factor;
}
}
} else {
row--;
}
}
if (row == n || col == m) { // ??
row--;
}
for (long t = 0; t < k; t++) {
for (long i = row; i >= 0; i--) {
long index = x_index[i];
double sum = 0.0;
for (long j = index + 1; j < m; j++) {
sum += A[prm[i]][j] * X[j][t];
}
if (fabs(A[prm[i]][index]) > eps) {
X[index][t] = (B[prm[i]][t] - sum) / A[prm[i]][index];
} else {
X[index][t] = 0.0;
}
}
}
}
int main()
{
long n, m, k;
cin >> n >> m >> k;
double **A = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
A[i] = (double *)malloc(m * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < m; j++) {
cin >> A[i][j];
}
}
double **B = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
B[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < k; j++) {
cin >> B[i][j];
}
}
double **X = (double **)malloc(m * sizeof(double *));
for (long i = 0; i < m; i++) {
X[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < m; i++) {
for (long j = 0; j < k; j++) {
X[i][j] = 0.0;
}
}
solve_equation(A, X, B, n, m, k);
print_matrix(X, m, k);
return 0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //
// main.cpp
// matrix-test
//
// Created by Nikita Makarov on 22/03/16.
// Copyright © 2016 Nikita Makarov. All rights reserved.
//
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <iomanip>
using namespace std;
const double eps = 10e-7;
void print_matrix(double **M, long n, long m)
{
cout.setf(ios::scientific);
cout.precision(10);
for (long i = 0; i < n; i++) {
cout << M[i][0];
for (long j = 1; j < m; j++) {
cout << " " << M[i][j];
}
cout << endl;
}
}
void swap_rows(long *prm, long i, long j)
{
long temp = prm[i];
prm[i] = prm[j];
prm[j] = temp;
}
bool zero_column(double **A, long *prm, long n, long i, long j)
{
for ( ; i < n; i++) {
if (fabs(A[prm[i]][j]) > eps) {
return false;
}
}
return true;
}
void solve_equation(double **A, double **X, double **B, long n, long m, long k)
{
long *prm = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
prm[i] = i;
}
long *x_index = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
x_index[i] = i;
}
long row = 0;
long col = 0;
for ( ; row < n && col < m; row++, col++) {
long max_value_row = row;
for (long i = row + 1; i < n; i++) {
if (fabs(A[prm[i]][col]) > fabs(A[prm[max_value_row]][col])) {
max_value_row = i;
}
}
swap_rows(prm, row, max_value_row);
if (fabs(A[prm[row]][col]) > eps) {
x_index[row] = col;
for (long i = row + 1; i < n; i++) {
double factor = -A[prm[i]][col] / A[prm[row]][col];
for (long j = col; j < m; j++) {
A[prm[i]][j] += A[prm[row]][j] * factor;
}
for (long j = 0; j < k; j++) {
B[prm[i]][j] += B[prm[row]][j] * factor;
}
}
} else {
row--;
}
}
if (row == n || col == m) { // ??
row--;
}
for (long t = 0; t < k; t++) {
for (long i = row; i >= 0; i--) {
long index = x_index[i];
double sum = 0.0;
for (long j = index + 1; j < m; j++) {
sum += A[prm[i]][j] * X[j][t];
}
if (fabs(A[prm[i]][index]) > eps) {
X[index][t] = (B[prm[i]][t] - sum) / A[prm[i]][index];
} else {
X[index][t] = 0.0;
}
}
}
}
int main()
{
long n, m, k;
cin >> n >> m >> k;
double **A = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
A[i] = (double *)malloc(m * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < m; j++) {
cin >> A[i][j];
}
}
double **B = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
B[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < k; j++) {
cin >> B[i][j];
}
}
double **X = (double **)malloc(m * sizeof(double *));
for (long i = 0; i < m; i++) {
X[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < m; i++) {
for (long j = 0; j < k; j++) {
X[i][j] = 0.0;
}
}
solve_equation(A, X, B, n, m, k);
print_matrix(X, m, k);
return 0;
} | .file "tmpxft_001998a0_00000000-6_test_cpu.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4046:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4046:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string " "
.text
.globl _Z12print_matrixPPdll
.type _Z12print_matrixPPdll, @function
_Z12print_matrixPPdll:
.LFB4039:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, 8(%rsp)
orl $256, 32+_ZSt4cout(%rip)
movq $10, 16+_ZSt4cout(%rip)
testq %rsi, %rsi
jle .L3
movq %rdx, %r12
movq %rdi, %r15
movq $0, (%rsp)
leaq _ZSt4cout(%rip), %rbp
leaq .LC0(%rip), %r14
jmp .L10
.L14:
call _ZSt16__throw_bad_castv@PLT
.L8:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
.L9:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $1, (%rsp)
movq (%rsp), %rax
addq $8, %r15
cmpq %rax, 8(%rsp)
je .L3
.L10:
movq %r15, %r13
movq (%r15), %rax
movsd (%rax), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
cmpq $1, %r12
jle .L5
movl $1, %ebx
.L6:
movl $1, %edx
movq %r14, %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq 0(%r13), %rax
movsd (%rax,%rbx,8), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
addq $1, %rbx
cmpq %rbx, %r12
jne .L6
.L5:
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %rbx
testq %rbx, %rbx
je .L14
cmpb $0, 56(%rbx)
je .L8
movzbl 67(%rbx), %esi
jmp .L9
.L3:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4039:
.size _Z12print_matrixPPdll, .-_Z12print_matrixPPdll
.globl _Z9swap_rowsPlll
.type _Z9swap_rowsPlll, @function
_Z9swap_rowsPlll:
.LFB4040:
.cfi_startproc
endbr64
movq %rdx, %rax
leaq (%rdi,%rsi,8), %rdx
movq (%rdx), %rcx
leaq (%rdi,%rax,8), %rax
movq (%rax), %rsi
movq %rsi, (%rdx)
movq %rcx, (%rax)
ret
.cfi_endproc
.LFE4040:
.size _Z9swap_rowsPlll, .-_Z9swap_rowsPlll
.globl _Z11zero_columnPPdPllll
.type _Z11zero_columnPPdPllll, @function
_Z11zero_columnPPdPllll:
.LFB4041:
.cfi_startproc
endbr64
cmpq %rdx, %rcx
jge .L19
salq $3, %r8
movq .LC1(%rip), %xmm2
movsd .LC2(%rip), %xmm1
.L18:
movq (%rsi,%rcx,8), %rax
movq (%rdi,%rax,8), %rax
movsd (%rax,%r8), %xmm0
andpd %xmm2, %xmm0
comisd %xmm1, %xmm0
ja .L20
addq $1, %rcx
cmpq %rcx, %rdx
jne .L18
movl $1, %eax
ret
.L19:
movl $1, %eax
ret
.L20:
movl $0, %eax
ret
.cfi_endproc
.LFE4041:
.size _Z11zero_columnPPdPllll, .-_Z11zero_columnPPdPllll
.globl _Z14solve_equationPPdS0_S0_lll
.type _Z14solve_equationPPdS0_S0_lll, @function
_Z14solve_equationPPdS0_S0_lll:
.LFB4042:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r14
movq %rsi, %rbp
movq %rdx, %r12
movq %r8, %rbx
movq %r9, %r15
movq %rcx, 56(%rsp)
leaq 0(,%rcx,8), %rax
movq %rax, (%rsp)
movq %rax, %rdi
call malloc@PLT
movq %rax, %r13
movq 56(%rsp), %rdx
testq %rdx, %rdx
jle .L23
movl $0, %eax
.L24:
movq %rax, 0(%r13,%rax,8)
movq %rax, %rcx
addq $1, %rax
cmpq %rax, %rdx
jne .L24
movq %rcx, 16(%rsp)
movq (%rsp), %rdi
call malloc@PLT
movl $0, %edx
movq 16(%rsp), %rsi
.L25:
movq %rdx, (%rax,%rdx,8)
movq %rdx, %rcx
addq $1, %rdx
cmpq %rcx, %rsi
jne .L25
testq %rbx, %rbx
jle .L52
movq (%rsp), %rcx
addq %r13, %rcx
movq %rcx, 40(%rsp)
movq $0, 8(%rsp)
movq $0, (%rsp)
movq %rax, 32(%rsp)
movq %rbp, 48(%rsp)
movq 16(%rsp), %rbp
jmp .L39
.L55:
movq %rsi, %rax
.L29:
movq 0(%r13,%rax,8), %rsi
movq (%r14,%rsi,8), %rsi
movsd (%rsi,%rcx), %xmm1
andpd .LC1(%rip), %xmm1
movq 0(%r13,%rdx,8), %rsi
movq (%r14,%rsi,8), %rsi
movsd (%rsi,%rcx), %xmm0
andpd .LC1(%rip), %xmm0
ucomisd %xmm0, %xmm1
cmova %rax, %rdx
leaq 1(%rax), %rsi
cmpq %rax, %rbp
jne .L55
.L27:
movq (%rsp), %rsi
movq %r13, %rdi
call _Z9swap_rowsPlll
movq (%rsp), %rax
leaq 0(,%rax,8), %rdx
movq 0(%r13,%rax,8), %rax
movq %rax, 16(%rsp)
leaq 0(,%rax,8), %rsi
leaq (%r14,%rsi), %rcx
movq 8(%rsp), %rdi
leaq 0(,%rdi,8), %r8
movq (%rcx), %rax
movsd (%rax,%rdi,8), %xmm0
andpd .LC1(%rip), %xmm0
movq (%rsp), %rax
subq $1, %rax
comisd .LC2(%rip), %xmm0
ja .L77
.L38:
leaq 1(%rax), %rcx
movq %rcx, (%rsp)
addq $1, 8(%rsp)
movq 8(%rsp), %rcx
cmpq %rbp, %rax
jge .L74
cmpq %rcx, %rbx
jle .L78
.L39:
movq (%rsp), %rdx
leaq 1(%rdx), %rax
movq %rax, 24(%rsp)
cmpq %rdx, %rbp
jle .L53
movq 8(%rsp), %rcx
salq $3, %rcx
jmp .L29
.L53:
movq (%rsp), %rdx
jmp .L27
.L77:
movq 32(%rsp), %rdi
movq (%rsp), %rax
movq 8(%rsp), %r11
movq %r11, (%rdi,%rax,8)
cmpq %rax, %rbp
jle .L32
leaq 8(%r13,%rdx), %rdi
movq %r13, 24(%rsp)
movq %rbp, %r13
movq 40(%rsp), %rbp
.L37:
movq (%rdi), %rax
leaq 0(,%rax,8), %r9
leaq (%r14,%r9), %r10
movq (%r10), %rax
movsd (%rax,%r8), %xmm1
xorpd .LC4(%rip), %xmm1
movq (%rcx), %rax
divsd (%rax,%r8), %xmm1
cmpq %r11, %rbx
jle .L33
movq %r11, %rax
movq %rsi, 16(%rsp)
.L34:
movq (%r10), %rdx
leaq (%rdx,%rax,8), %rdx
movq (%rcx), %rsi
movapd %xmm1, %xmm0
mulsd (%rsi,%rax,8), %xmm0
addsd (%rdx), %xmm0
movsd %xmm0, (%rdx)
addq $1, %rax
cmpq %rax, %rbx
jne .L34
movq 16(%rsp), %rsi
.L33:
testq %r15, %r15
jle .L35
movl $0, %eax
.L36:
movq (%r12,%r9), %rdx
leaq (%rdx,%rax,8), %rdx
movq (%r12,%rsi), %r10
movapd %xmm1, %xmm0
mulsd (%r10,%rax,8), %xmm0
addsd (%rdx), %xmm0
movsd %xmm0, (%rdx)
addq $1, %rax
cmpq %rax, %r15
jne .L36
.L35:
addq $8, %rdi
cmpq %rbp, %rdi
jne .L37
movq %r13, %rbp
movq 24(%rsp), %r13
movq (%rsp), %rax
jmp .L38
.L78:
movq 32(%rsp), %rax
movq 48(%rsp), %rbp
jmp .L26
.L52:
movq $0, 8(%rsp)
movq $0, (%rsp)
jmp .L26
.L74:
movq 32(%rsp), %rax
movq 48(%rsp), %rbp
.L26:
movq (%rsp), %rsi
cmpq %rsi, 56(%rsp)
je .L60
movq 8(%rsp), %rcx
cmpq %rcx, %rbx
je .L60
.L41:
movl $0, %ecx
movl $0, %edx
testq %r15, %r15
jle .L22
movq .LC1(%rip), %xmm4
movsd .LC2(%rip), %xmm3
movq (%rsp), %rsi
jmp .L43
.L60:
subq $1, (%rsp)
jmp .L41
.L57:
pxor %xmm1, %xmm1
jmp .L45
.L73:
movq 0(%rbp,%r10,8), %rdx
movq $0x000000000, (%rdx,%rcx)
.L49:
subq $1, %r9
cmpq $-1, %r9
je .L79
.L50:
movq %r9, %r11
movq (%rax,%r9,8), %r10
leaq 1(%r10), %rdx
cmpq %rdx, %rbx
jle .L57
movq 0(%r13,%r9,8), %rdi
movq (%r14,%rdi,8), %r8
pxor %xmm1, %xmm1
.L46:
movq 0(%rbp,%rdx,8), %rdi
movsd (%rdi,%rcx), %xmm0
mulsd (%r8,%rdx,8), %xmm0
addsd %xmm0, %xmm1
addq $1, %rdx
cmpq %rdx, %rbx
jne .L46
.L45:
movq 0(%r13,%r11,8), %rdi
movq (%r14,%rdi,8), %rdx
movsd (%rdx,%r10,8), %xmm2
movapd %xmm2, %xmm0
andpd %xmm4, %xmm0
comisd %xmm3, %xmm0
jbe .L73
movq 0(%rbp,%r10,8), %rdx
movq (%r12,%rdi,8), %rdi
movsd (%rdi,%rcx), %xmm0
subsd %xmm1, %xmm0
divsd %xmm2, %xmm0
movsd %xmm0, (%rdx,%rcx)
jmp .L49
.L79:
movq (%rsp), %rdx
.L51:
addq $1, %rdx
addq $8, %rcx
cmpq %rdx, %r15
je .L22
.L43:
movq %rsi, %r9
testq %rsi, %rsi
js .L51
movq %rdx, (%rsp)
jmp .L50
.L32:
movq 32(%rsp), %rax
movq 48(%rsp), %rbp
addq $1, 8(%rsp)
movq 24(%rsp), %rcx
movq %rcx, (%rsp)
jmp .L26
.L23:
movq (%rsp), %rdi
call malloc@PLT
movq $0, 8(%rsp)
movq $0, (%rsp)
jmp .L26
.L22:
addq $72, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4042:
.size _Z14solve_equationPPdS0_S0_lll, .-_Z14solve_equationPPdS0_S0_lll
.globl main
.type main, @function
main:
.LFB4043:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rsi
leaq _ZSt3cin(%rip), %rdi
call _ZNSi10_M_extractIlEERSiRT_@PLT
movq %rax, %rdi
leaq 40(%rsp), %rsi
call _ZNSi10_M_extractIlEERSiRT_@PLT
movq %rax, %rdi
leaq 48(%rsp), %rsi
call _ZNSi10_M_extractIlEERSiRT_@PLT
movq 32(%rsp), %rbx
leaq 0(,%rbx,8), %r12
movq %r12, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
testq %rbx, %rbx
jle .L81
movq %rax, %rbx
movq %rax, %rbp
addq %rax, %r12
.L82:
movq 40(%rsp), %rax
leaq 0(,%rax,8), %rdi
call malloc@PLT
movq %rax, (%rbx)
addq $8, %rbx
cmpq %r12, %rbx
jne .L82
movl $0, %r13d
leaq _ZSt3cin(%rip), %r12
jmp .L83
.L84:
movq 0(%rbp), %rax
leaq (%rax,%rbx,8), %rsi
movq %r12, %rdi
call _ZNSi10_M_extractIdEERSiRT_@PLT
addq $1, %rbx
cmpq %rbx, 40(%rsp)
jg .L84
.L86:
addq $1, %r13
movq 32(%rsp), %rbx
addq $8, %rbp
cmpq %r13, %rbx
jle .L85
.L83:
movl $0, %ebx
cmpq $0, 40(%rsp)
jg .L84
jmp .L86
.L85:
leaq 0(,%rbx,8), %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, 16(%rsp)
testq %rbx, %rbx
jle .L87
movq %rax, %rbx
addq %rax, %rbp
.L88:
movq 48(%rsp), %rax
leaq 0(,%rax,8), %rdi
call malloc@PLT
movq %rax, (%rbx)
addq $8, %rbx
cmpq %rbp, %rbx
jne .L88
movq 16(%rsp), %rbx
movl $0, %r12d
leaq _ZSt3cin(%rip), %rbp
jmp .L99
.L90:
movq (%rbx), %rax
leaq (%rax,%r13,8), %rsi
movq %rbp, %rdi
call _ZNSi10_M_extractIdEERSiRT_@PLT
addq $1, %r13
cmpq %r13, 48(%rsp)
jg .L90
.L92:
addq $1, %r12
movq 32(%rsp), %r13
addq $8, %rbx
cmpq %r12, %r13
jle .L91
.L99:
movl $0, %r13d
cmpq $0, 48(%rsp)
jg .L90
jmp .L92
.L97:
movq (%r14,%rcx,8), %rax
leaq (%r12,%rax), %rdx
.L96:
movq $0x000000000, (%rax)
addq $8, %rax
cmpq %rax, %rdx
jne .L96
.L98:
addq $1, %rcx
cmpq %rcx, %r15
je .L93
.L95:
testq %rbp, %rbp
jg .L97
jmp .L98
.L93:
movq 48(%rsp), %r9
movq %r15, %r8
movq %r13, %rcx
movq 16(%rsp), %rdx
movq %r14, %rsi
movq 24(%rsp), %rdi
call _Z14solve_equationPPdS0_S0_lll
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
movq %r14, %rdi
call _Z12print_matrixPPdll
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L114
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L81:
.cfi_restore_state
movq %r12, %rdi
call malloc@PLT
movq %rax, 16(%rsp)
.L87:
movq 32(%rsp), %r13
.L91:
movq 40(%rsp), %r15
leaq 0(,%r15,8), %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r14
testq %r15, %r15
jle .L93
movq %rax, %rbx
leaq 0(%rbp,%rax), %rax
movq %rax, 8(%rsp)
.L94:
movq 48(%rsp), %rbp
leaq 0(,%rbp,8), %r12
movq %r12, %rdi
call malloc@PLT
movq %rax, (%rbx)
addq $8, %rbx
movq 8(%rsp), %rax
cmpq %rax, %rbx
jne .L94
movl $0, %ecx
jmp .L95
.L114:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4043:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4069:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4069:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC1:
.long -1
.long 2147483647
.long 0
.long 0
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long -1598689907
.long 1051772663
.section .rodata.cst16
.align 16
.LC4:
.long 0
.long -2147483648
.long 0
.long 0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //
// main.cpp
// matrix-test
//
// Created by Nikita Makarov on 22/03/16.
// Copyright © 2016 Nikita Makarov. All rights reserved.
//
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <iomanip>
using namespace std;
const double eps = 10e-7;
void print_matrix(double **M, long n, long m)
{
cout.setf(ios::scientific);
cout.precision(10);
for (long i = 0; i < n; i++) {
cout << M[i][0];
for (long j = 1; j < m; j++) {
cout << " " << M[i][j];
}
cout << endl;
}
}
void swap_rows(long *prm, long i, long j)
{
long temp = prm[i];
prm[i] = prm[j];
prm[j] = temp;
}
bool zero_column(double **A, long *prm, long n, long i, long j)
{
for ( ; i < n; i++) {
if (fabs(A[prm[i]][j]) > eps) {
return false;
}
}
return true;
}
void solve_equation(double **A, double **X, double **B, long n, long m, long k)
{
long *prm = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
prm[i] = i;
}
long *x_index = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
x_index[i] = i;
}
long row = 0;
long col = 0;
for ( ; row < n && col < m; row++, col++) {
long max_value_row = row;
for (long i = row + 1; i < n; i++) {
if (fabs(A[prm[i]][col]) > fabs(A[prm[max_value_row]][col])) {
max_value_row = i;
}
}
swap_rows(prm, row, max_value_row);
if (fabs(A[prm[row]][col]) > eps) {
x_index[row] = col;
for (long i = row + 1; i < n; i++) {
double factor = -A[prm[i]][col] / A[prm[row]][col];
for (long j = col; j < m; j++) {
A[prm[i]][j] += A[prm[row]][j] * factor;
}
for (long j = 0; j < k; j++) {
B[prm[i]][j] += B[prm[row]][j] * factor;
}
}
} else {
row--;
}
}
if (row == n || col == m) { // ??
row--;
}
for (long t = 0; t < k; t++) {
for (long i = row; i >= 0; i--) {
long index = x_index[i];
double sum = 0.0;
for (long j = index + 1; j < m; j++) {
sum += A[prm[i]][j] * X[j][t];
}
if (fabs(A[prm[i]][index]) > eps) {
X[index][t] = (B[prm[i]][t] - sum) / A[prm[i]][index];
} else {
X[index][t] = 0.0;
}
}
}
}
int main()
{
long n, m, k;
cin >> n >> m >> k;
double **A = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
A[i] = (double *)malloc(m * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < m; j++) {
cin >> A[i][j];
}
}
double **B = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
B[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < k; j++) {
cin >> B[i][j];
}
}
double **X = (double **)malloc(m * sizeof(double *));
for (long i = 0; i < m; i++) {
X[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < m; i++) {
for (long j = 0; j < k; j++) {
X[i][j] = 0.0;
}
}
solve_equation(A, X, B, n, m, k);
print_matrix(X, m, k);
return 0;
} | //
// main.cpp
// matrix-test
//
// Created by Nikita Makarov on 22/03/16.
// Copyright © 2016 Nikita Makarov. All rights reserved.
//
#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <iomanip>
using namespace std;
const double eps = 10e-7;
void print_matrix(double **M, long n, long m)
{
cout.setf(ios::scientific);
cout.precision(10);
for (long i = 0; i < n; i++) {
cout << M[i][0];
for (long j = 1; j < m; j++) {
cout << " " << M[i][j];
}
cout << endl;
}
}
void swap_rows(long *prm, long i, long j)
{
long temp = prm[i];
prm[i] = prm[j];
prm[j] = temp;
}
bool zero_column(double **A, long *prm, long n, long i, long j)
{
for ( ; i < n; i++) {
if (fabs(A[prm[i]][j]) > eps) {
return false;
}
}
return true;
}
void solve_equation(double **A, double **X, double **B, long n, long m, long k)
{
long *prm = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
prm[i] = i;
}
long *x_index = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
x_index[i] = i;
}
long row = 0;
long col = 0;
for ( ; row < n && col < m; row++, col++) {
long max_value_row = row;
for (long i = row + 1; i < n; i++) {
if (fabs(A[prm[i]][col]) > fabs(A[prm[max_value_row]][col])) {
max_value_row = i;
}
}
swap_rows(prm, row, max_value_row);
if (fabs(A[prm[row]][col]) > eps) {
x_index[row] = col;
for (long i = row + 1; i < n; i++) {
double factor = -A[prm[i]][col] / A[prm[row]][col];
for (long j = col; j < m; j++) {
A[prm[i]][j] += A[prm[row]][j] * factor;
}
for (long j = 0; j < k; j++) {
B[prm[i]][j] += B[prm[row]][j] * factor;
}
}
} else {
row--;
}
}
if (row == n || col == m) { // ??
row--;
}
for (long t = 0; t < k; t++) {
for (long i = row; i >= 0; i--) {
long index = x_index[i];
double sum = 0.0;
for (long j = index + 1; j < m; j++) {
sum += A[prm[i]][j] * X[j][t];
}
if (fabs(A[prm[i]][index]) > eps) {
X[index][t] = (B[prm[i]][t] - sum) / A[prm[i]][index];
} else {
X[index][t] = 0.0;
}
}
}
}
int main()
{
long n, m, k;
cin >> n >> m >> k;
double **A = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
A[i] = (double *)malloc(m * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < m; j++) {
cin >> A[i][j];
}
}
double **B = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
B[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < k; j++) {
cin >> B[i][j];
}
}
double **X = (double **)malloc(m * sizeof(double *));
for (long i = 0; i < m; i++) {
X[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < m; i++) {
for (long j = 0; j < k; j++) {
X[i][j] = 0.0;
}
}
solve_equation(A, X, B, n, m, k);
print_matrix(X, m, k);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //
// main.cpp
// matrix-test
//
// Created by Nikita Makarov on 22/03/16.
// Copyright © 2016 Nikita Makarov. All rights reserved.
//
#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <iomanip>
using namespace std;
const double eps = 10e-7;
void print_matrix(double **M, long n, long m)
{
cout.setf(ios::scientific);
cout.precision(10);
for (long i = 0; i < n; i++) {
cout << M[i][0];
for (long j = 1; j < m; j++) {
cout << " " << M[i][j];
}
cout << endl;
}
}
void swap_rows(long *prm, long i, long j)
{
long temp = prm[i];
prm[i] = prm[j];
prm[j] = temp;
}
bool zero_column(double **A, long *prm, long n, long i, long j)
{
for ( ; i < n; i++) {
if (fabs(A[prm[i]][j]) > eps) {
return false;
}
}
return true;
}
void solve_equation(double **A, double **X, double **B, long n, long m, long k)
{
long *prm = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
prm[i] = i;
}
long *x_index = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
x_index[i] = i;
}
long row = 0;
long col = 0;
for ( ; row < n && col < m; row++, col++) {
long max_value_row = row;
for (long i = row + 1; i < n; i++) {
if (fabs(A[prm[i]][col]) > fabs(A[prm[max_value_row]][col])) {
max_value_row = i;
}
}
swap_rows(prm, row, max_value_row);
if (fabs(A[prm[row]][col]) > eps) {
x_index[row] = col;
for (long i = row + 1; i < n; i++) {
double factor = -A[prm[i]][col] / A[prm[row]][col];
for (long j = col; j < m; j++) {
A[prm[i]][j] += A[prm[row]][j] * factor;
}
for (long j = 0; j < k; j++) {
B[prm[i]][j] += B[prm[row]][j] * factor;
}
}
} else {
row--;
}
}
if (row == n || col == m) { // ??
row--;
}
for (long t = 0; t < k; t++) {
for (long i = row; i >= 0; i--) {
long index = x_index[i];
double sum = 0.0;
for (long j = index + 1; j < m; j++) {
sum += A[prm[i]][j] * X[j][t];
}
if (fabs(A[prm[i]][index]) > eps) {
X[index][t] = (B[prm[i]][t] - sum) / A[prm[i]][index];
} else {
X[index][t] = 0.0;
}
}
}
}
int main()
{
long n, m, k;
cin >> n >> m >> k;
double **A = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
A[i] = (double *)malloc(m * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < m; j++) {
cin >> A[i][j];
}
}
double **B = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
B[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < k; j++) {
cin >> B[i][j];
}
}
double **X = (double **)malloc(m * sizeof(double *));
for (long i = 0; i < m; i++) {
X[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < m; i++) {
for (long j = 0; j < k; j++) {
X[i][j] = 0.0;
}
}
solve_equation(A, X, B, n, m, k);
print_matrix(X, m, k);
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //
// main.cpp
// matrix-test
//
// Created by Nikita Makarov on 22/03/16.
// Copyright © 2016 Nikita Makarov. All rights reserved.
//
#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <iomanip>
using namespace std;
const double eps = 10e-7;
void print_matrix(double **M, long n, long m)
{
cout.setf(ios::scientific);
cout.precision(10);
for (long i = 0; i < n; i++) {
cout << M[i][0];
for (long j = 1; j < m; j++) {
cout << " " << M[i][j];
}
cout << endl;
}
}
void swap_rows(long *prm, long i, long j)
{
long temp = prm[i];
prm[i] = prm[j];
prm[j] = temp;
}
bool zero_column(double **A, long *prm, long n, long i, long j)
{
for ( ; i < n; i++) {
if (fabs(A[prm[i]][j]) > eps) {
return false;
}
}
return true;
}
void solve_equation(double **A, double **X, double **B, long n, long m, long k)
{
long *prm = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
prm[i] = i;
}
long *x_index = (long *)malloc(n * sizeof(long));
for (long i = 0; i < n; i++) {
x_index[i] = i;
}
long row = 0;
long col = 0;
for ( ; row < n && col < m; row++, col++) {
long max_value_row = row;
for (long i = row + 1; i < n; i++) {
if (fabs(A[prm[i]][col]) > fabs(A[prm[max_value_row]][col])) {
max_value_row = i;
}
}
swap_rows(prm, row, max_value_row);
if (fabs(A[prm[row]][col]) > eps) {
x_index[row] = col;
for (long i = row + 1; i < n; i++) {
double factor = -A[prm[i]][col] / A[prm[row]][col];
for (long j = col; j < m; j++) {
A[prm[i]][j] += A[prm[row]][j] * factor;
}
for (long j = 0; j < k; j++) {
B[prm[i]][j] += B[prm[row]][j] * factor;
}
}
} else {
row--;
}
}
if (row == n || col == m) { // ??
row--;
}
for (long t = 0; t < k; t++) {
for (long i = row; i >= 0; i--) {
long index = x_index[i];
double sum = 0.0;
for (long j = index + 1; j < m; j++) {
sum += A[prm[i]][j] * X[j][t];
}
if (fabs(A[prm[i]][index]) > eps) {
X[index][t] = (B[prm[i]][t] - sum) / A[prm[i]][index];
} else {
X[index][t] = 0.0;
}
}
}
}
int main()
{
long n, m, k;
cin >> n >> m >> k;
double **A = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
A[i] = (double *)malloc(m * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < m; j++) {
cin >> A[i][j];
}
}
double **B = (double **)malloc(n * sizeof(double *));
for (long i = 0; i < n; i++) {
B[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < n; i++) {
for (long j = 0; j < k; j++) {
cin >> B[i][j];
}
}
double **X = (double **)malloc(m * sizeof(double *));
for (long i = 0; i < m; i++) {
X[i] = (double *)malloc(k * sizeof(double));
}
for (long i = 0; i < m; i++) {
for (long j = 0; j < k; j++) {
X[i][j] = 0.0;
}
}
solve_equation(A, X, B, n, m, k);
print_matrix(X, m, k);
return 0;
} | .text
.file "test_cpu.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z12print_matrixPPdll # -- Begin function _Z12print_matrixPPdll
.p2align 4, 0x90
.type _Z12print_matrixPPdll,@function
_Z12print_matrixPPdll: # @_Z12print_matrixPPdll
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rcx
orl $256, _ZSt4cout+24(%rcx) # imm = 0x100
movq -24(%rax), %rax
movq $10, _ZSt4cout+8(%rax)
testq %rsi, %rsi
jle .LBB0_10
# %bb.1: # %.lr.ph14
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
xorl %r13d, %r13d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_8: # in Loop: Header=BB0_2 Depth=1
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB0_2 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r13
cmpq %r14, %r13
je .LBB0_10
.LBB0_2: # =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
movq (%r15,%r13,8), %rax
movsd (%rax), %xmm0 # xmm0 = mem[0],zero
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
cmpq $2, %rbx
jl .LBB0_5
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB0_2 Depth=1
movl $1, %r12d
.p2align 4, 0x90
.LBB0_4: # %.lr.ph
# Parent Loop BB0_2 Depth=1
# => This Inner Loop Header: Depth=2
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r15,%r13,8), %rax
movsd (%rax,%r12,8), %xmm0 # xmm0 = mem[0],zero
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
incq %r12
cmpq %r12, %rbx
jne .LBB0_4
.LBB0_5: # %._crit_edge
# in Loop: Header=BB0_2 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB0_11
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB0_2 Depth=1
cmpb $0, 56(%r12)
je .LBB0_8
# %bb.7: # in Loop: Header=BB0_2 Depth=1
movzbl 67(%r12), %eax
jmp .LBB0_9
.LBB0_10: # %._crit_edge15
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB0_11:
.cfi_def_cfa_offset 48
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size _Z12print_matrixPPdll, .Lfunc_end0-_Z12print_matrixPPdll
.cfi_endproc
# -- End function
.globl _Z9swap_rowsPlll # -- Begin function _Z9swap_rowsPlll
.p2align 4, 0x90
.type _Z9swap_rowsPlll,@function
_Z9swap_rowsPlll: # @_Z9swap_rowsPlll
.cfi_startproc
# %bb.0:
movq (%rdi,%rsi,8), %rax
movq (%rdi,%rdx,8), %rcx
movq %rcx, (%rdi,%rsi,8)
movq %rax, (%rdi,%rdx,8)
retq
.Lfunc_end1:
.size _Z9swap_rowsPlll, .Lfunc_end1-_Z9swap_rowsPlll
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z11zero_columnPPdPllll
.LCPI2_0:
.quad 0x7fffffffffffffff # double NaN
.quad 0x7fffffffffffffff # double NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI2_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z11zero_columnPPdPllll
.p2align 4, 0x90
.type _Z11zero_columnPPdPllll,@function
_Z11zero_columnPPdPllll: # @_Z11zero_columnPPdPllll
.cfi_startproc
# %bb.0:
cmpq %rdx, %rcx
setge %al
jge .LBB2_6
# %bb.1: # %.lr.ph.preheader
movq (%rsi,%rcx,8), %r9
movq (%rdi,%r9,8), %r9
movsd (%r9,%r8,8), %xmm0 # xmm0 = mem[0],zero
andpd .LCPI2_0(%rip), %xmm0
ucomisd .LCPI2_1(%rip), %xmm0
ja .LBB2_6
# %bb.2: # %.lr.ph12.preheader
incq %rcx
movapd .LCPI2_0(%rip), %xmm0 # xmm0 = [NaN,NaN]
movsd .LCPI2_1(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB2_3: # %.lr.ph12
# =>This Inner Loop Header: Depth=1
movq %rcx, %rax
cmpq %rcx, %rdx
je .LBB2_5
# %bb.4: # %.lr.ph
# in Loop: Header=BB2_3 Depth=1
movq (%rsi,%rax,8), %rcx
movq (%rdi,%rcx,8), %rcx
movsd (%rcx,%r8,8), %xmm2 # xmm2 = mem[0],zero
andpd %xmm0, %xmm2
leaq 1(%rax), %rcx
ucomisd %xmm1, %xmm2
jbe .LBB2_3
.LBB2_5: # %._crit_edge.loopexit
cmpq %rdx, %rax
setge %al
.LBB2_6: # %._crit_edge
retq
.Lfunc_end2:
.size _Z11zero_columnPPdPllll, .Lfunc_end2-_Z11zero_columnPPdPllll
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z14solve_equationPPdS0_S0_lll
.LCPI3_0:
.quad 0x7fffffffffffffff # double NaN
.quad 0x7fffffffffffffff # double NaN
.LCPI3_2:
.quad 0x8000000000000000 # double -0
.quad 0x8000000000000000 # double -0
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI3_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z14solve_equationPPdS0_S0_lll
.p2align 4, 0x90
.type _Z14solve_equationPPdS0_S0_lll,@function
_Z14solve_equationPPdS0_S0_lll: # @_Z14solve_equationPPdS0_S0_lll
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, %rbx
movq %r8, %r14
movq %rcx, %r15
movq %rdx, 8(%rsp) # 8-byte Spill
movq %rsi, %r12
movq %rdi, %r13
leaq (,%rcx,8), %rdi
movq %rdi, 16(%rsp) # 8-byte Spill
callq malloc
movq %rax, %rbp
testq %r15, %r15
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq %rax, (%rbp,%rax,8)
incq %rax
cmpq %rax, %r15
jne .LBB3_2
.LBB3_3: # %._crit_edge
movq 16(%rsp), %rdi # 8-byte Reload
callq malloc
testq %r15, %r15
jle .LBB3_6
# %bb.4: # %.lr.ph145.preheader
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_5: # %.lr.ph145
# =>This Inner Loop Header: Depth=1
movq %rcx, (%rax,%rcx,8)
incq %rcx
cmpq %rcx, %r15
jne .LBB3_5
.LBB3_6: # %.preheader141
xorl %ecx, %ecx
testq %r15, %r15
jle .LBB3_7
# %bb.8: # %.preheader141
movl $0, %edx
testq %r14, %r14
jle .LBB3_25
# %bb.9: # %.lr.ph163
xorl %edx, %edx
movapd .LCPI3_0(%rip), %xmm0 # xmm0 = [NaN,NaN]
movapd .LCPI3_0(%rip), %xmm1 # xmm1 = [NaN,NaN]
movsd .LCPI3_1(%rip), %xmm2 # xmm2 = mem[0],zero
movapd .LCPI3_2(%rip), %xmm3 # xmm3 = [-0.0E+0,-0.0E+0]
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_10: # =>This Loop Header: Depth=1
# Child Loop BB3_19 Depth 2
# Child Loop BB3_13 Depth 2
# Child Loop BB3_14 Depth 3
# Child Loop BB3_17 Depth 3
leaq 1(%rcx), %rsi
movq %rcx, %rdi
movq %rsi, %r8
cmpq %r15, %rsi
jl .LBB3_19
.LBB3_11: # %._crit_edge150
# in Loop: Header=BB3_10 Depth=1
movq (%rbp,%rcx,8), %r8
movq (%rbp,%rdi,8), %r9
movq %r9, (%rbp,%rcx,8)
movq %r8, (%rbp,%rdi,8)
movq (%rbp,%rcx,8), %rdi
movq (%r13,%rdi,8), %r8
movsd (%r8,%rdx,8), %xmm4 # xmm4 = mem[0],zero
andpd %xmm1, %xmm4
ucomisd %xmm2, %xmm4
jbe .LBB3_22
# %bb.12: # in Loop: Header=BB3_10 Depth=1
movq %rdx, (%rax,%rcx,8)
cmpq %r15, %rsi
jl .LBB3_13
jmp .LBB3_23
.p2align 4, 0x90
.LBB3_21: # %.lr.ph149
# in Loop: Header=BB3_19 Depth=2
incq %r8
movq %r9, %rdi
cmpq %r8, %r15
je .LBB3_11
.LBB3_19: # %.lr.ph149
# Parent Loop BB3_10 Depth=1
# => This Inner Loop Header: Depth=2
movq (%rbp,%r8,8), %r9
movq (%r13,%r9,8), %r9
movsd (%r9,%rdx,8), %xmm4 # xmm4 = mem[0],zero
andpd %xmm0, %xmm4
movq (%rbp,%rdi,8), %r9
movq (%r13,%r9,8), %r9
movsd (%r9,%rdx,8), %xmm5 # xmm5 = mem[0],zero
andpd %xmm0, %xmm5
ucomisd %xmm5, %xmm4
movq %r8, %r9
ja .LBB3_21
# %bb.20: # %.lr.ph149
# in Loop: Header=BB3_19 Depth=2
movq %rdi, %r9
jmp .LBB3_21
.p2align 4, 0x90
.LBB3_18: # %._crit_edge156
# in Loop: Header=BB3_13 Depth=2
incq %rsi
cmpq %r15, %rsi
je .LBB3_23
.LBB3_13: # %.lr.ph153.preheader
# Parent Loop BB3_10 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_14 Depth 3
# Child Loop BB3_17 Depth 3
movq (%rbp,%rsi,8), %r9
movq (%r13,%r9,8), %r10
movsd (%r10,%rdx,8), %xmm4 # xmm4 = mem[0],zero
xorpd %xmm3, %xmm4
divsd (%r8,%rdx,8), %xmm4
movq %rdx, %r11
.p2align 4, 0x90
.LBB3_14: # %.lr.ph153
# Parent Loop BB3_10 Depth=1
# Parent Loop BB3_13 Depth=2
# => This Inner Loop Header: Depth=3
movsd (%r8,%r11,8), %xmm5 # xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
addsd (%r10,%r11,8), %xmm5
movsd %xmm5, (%r10,%r11,8)
incq %r11
cmpq %r14, %r11
jl .LBB3_14
# %bb.15: # %.preheader140
# in Loop: Header=BB3_13 Depth=2
testq %rbx, %rbx
jle .LBB3_18
# %bb.16: # %.lr.ph155
# in Loop: Header=BB3_13 Depth=2
movq 8(%rsp), %r11 # 8-byte Reload
movq (%r11,%rdi,8), %r10
movq (%r11,%r9,8), %r9
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB3_17: # Parent Loop BB3_10 Depth=1
# Parent Loop BB3_13 Depth=2
# => This Inner Loop Header: Depth=3
movsd (%r10,%r11,8), %xmm5 # xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
addsd (%r9,%r11,8), %xmm5
movsd %xmm5, (%r9,%r11,8)
incq %r11
cmpq %r11, %rbx
jne .LBB3_17
jmp .LBB3_18
.p2align 4, 0x90
.LBB3_22: # in Loop: Header=BB3_10 Depth=1
decq %rcx
.LBB3_23: # %.loopexit
# in Loop: Header=BB3_10 Depth=1
incq %rcx
incq %rdx
cmpq %r15, %rcx
jge .LBB3_25
# %bb.24: # %.loopexit
# in Loop: Header=BB3_10 Depth=1
cmpq %r14, %rdx
jl .LBB3_10
jmp .LBB3_25
.LBB3_7:
xorl %edx, %edx
.LBB3_25: # %._crit_edge164
cmpq %r15, %rcx
sete %sil
cmpq %r14, %rdx
sete %dl
testq %rbx, %rbx
movq 8(%rsp), %r11 # 8-byte Reload
jle .LBB3_29
# %bb.26: # %.preheader.lr.ph
orb %dl, %sil
movzbl %sil, %edx
subq %rdx, %rcx
xorl %edx, %edx
movapd .LCPI3_0(%rip), %xmm0 # xmm0 = [NaN,NaN]
movsd .LCPI3_1(%rip), %xmm1 # xmm1 = mem[0],zero
jmp .LBB3_27
.p2align 4, 0x90
.LBB3_28: # %._crit_edge176
# in Loop: Header=BB3_27 Depth=1
incq %rdx
cmpq %rbx, %rdx
je .LBB3_29
.LBB3_27: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_30 Depth 2
# Child Loop BB3_32 Depth 3
movq %rcx, %rsi
testq %rcx, %rcx
jns .LBB3_30
jmp .LBB3_28
.p2align 4, 0x90
.LBB3_35: # in Loop: Header=BB3_30 Depth=2
movq (%r12,%rdi,8), %rdi
movsd %xmm2, (%rdi,%rdx,8)
leaq -1(%rsi), %rdi
testq %rsi, %rsi
movq %rdi, %rsi
jle .LBB3_28
.LBB3_30: # %.lr.ph175
# Parent Loop BB3_27 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_32 Depth 3
movq (%rax,%rsi,8), %rdi
leaq 1(%rdi), %r8
xorpd %xmm2, %xmm2
xorpd %xmm3, %xmm3
cmpq %r14, %r8
jge .LBB3_33
# %bb.31: # %.lr.ph171
# in Loop: Header=BB3_30 Depth=2
movq (%rbp,%rsi,8), %r9
movq (%r13,%r9,8), %r9
.p2align 4, 0x90
.LBB3_32: # Parent Loop BB3_27 Depth=1
# Parent Loop BB3_30 Depth=2
# => This Inner Loop Header: Depth=3
movsd (%r9,%r8,8), %xmm4 # xmm4 = mem[0],zero
movq (%r12,%r8,8), %r10
mulsd (%r10,%rdx,8), %xmm4
addsd %xmm4, %xmm3
incq %r8
cmpq %r8, %r14
jne .LBB3_32
.LBB3_33: # %._crit_edge172
# in Loop: Header=BB3_30 Depth=2
movq (%rbp,%rsi,8), %r8
movq (%r13,%r8,8), %r9
movsd (%r9,%rdi,8), %xmm4 # xmm4 = mem[0],zero
movapd %xmm4, %xmm5
andpd %xmm0, %xmm5
ucomisd %xmm1, %xmm5
jbe .LBB3_35
# %bb.34: # in Loop: Header=BB3_30 Depth=2
movq (%r11,%r8,8), %r8
movsd (%r8,%rdx,8), %xmm2 # xmm2 = mem[0],zero
subsd %xmm3, %xmm2
divsd %xmm4, %xmm2
jmp .LBB3_35
.LBB3_29: # %._crit_edge178
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z14solve_equationPPdS0_S0_lll, .Lfunc_end3-_Z14solve_equationPPdS0_S0_lll
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 16(%rsp), %rsi
movl $_ZSt3cin, %edi
callq _ZNSi10_M_extractIlEERSiRT_
leaq 8(%rsp), %rsi
movq %rax, %rdi
callq _ZNSi10_M_extractIlEERSiRT_
movq %rsp, %rsi
movq %rax, %rdi
callq _ZNSi10_M_extractIlEERSiRT_
movq 16(%rsp), %r15
leaq (,%r15,8), %rdi
callq malloc
movq %rax, %r13
testq %r15, %r15
jle .LBB4_3
# %bb.1: # %.lr.ph
movq 8(%rsp), %r14
shlq $3, %r14
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_2: # =>This Inner Loop Header: Depth=1
movq %r14, %rdi
callq malloc
movq %rax, (%r13,%rbx,8)
incq %rbx
cmpq %rbx, %r15
jne .LBB4_2
.LBB4_3: # %.preheader51
movq 16(%rsp), %r12
testq %r12, %r12
jle .LBB4_9
# %bb.4: # %.preheader50.preheader
xorl %r15d, %r15d
jmp .LBB4_5
.p2align 4, 0x90
.LBB4_8: # %._crit_edge
# in Loop: Header=BB4_5 Depth=1
incq %r15
movq 16(%rsp), %r12
cmpq %r12, %r15
jge .LBB4_9
.LBB4_5: # %.preheader50
# =>This Loop Header: Depth=1
# Child Loop BB4_7 Depth 2
cmpq $0, 8(%rsp)
jle .LBB4_8
# %bb.6: # %.lr.ph55
# in Loop: Header=BB4_5 Depth=1
movq (%r13,%r15,8), %r14
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_7: # Parent Loop BB4_5 Depth=1
# => This Inner Loop Header: Depth=2
movl $_ZSt3cin, %edi
movq %r14, %rsi
callq _ZNSi10_M_extractIdEERSiRT_
incq %rbx
addq $8, %r14
cmpq 8(%rsp), %rbx
jl .LBB4_7
jmp .LBB4_8
.LBB4_9: # %._crit_edge57
leaq (,%r12,8), %rdi
callq malloc
movq %rax, %r14
testq %r12, %r12
jle .LBB4_12
# %bb.10: # %.lr.ph60
movq (%rsp), %r15
shlq $3, %r15
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_11: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
callq malloc
movq %rax, (%r14,%rbx,8)
incq %rbx
cmpq %rbx, %r12
jne .LBB4_11
.LBB4_12: # %.preheader49
movq 16(%rsp), %r15
testq %r15, %r15
jle .LBB4_18
# %bb.13: # %.preheader48.preheader
xorl %r12d, %r12d
jmp .LBB4_14
.p2align 4, 0x90
.LBB4_17: # %._crit_edge63
# in Loop: Header=BB4_14 Depth=1
incq %r12
movq 16(%rsp), %r15
cmpq %r15, %r12
jge .LBB4_18
.LBB4_14: # %.preheader48
# =>This Loop Header: Depth=1
# Child Loop BB4_16 Depth 2
cmpq $0, (%rsp)
jle .LBB4_17
# %bb.15: # %.lr.ph62
# in Loop: Header=BB4_14 Depth=1
movq (%r14,%r12,8), %r15
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_16: # Parent Loop BB4_14 Depth=1
# => This Inner Loop Header: Depth=2
movl $_ZSt3cin, %edi
movq %r15, %rsi
callq _ZNSi10_M_extractIdEERSiRT_
incq %rbx
addq $8, %r15
cmpq (%rsp), %rbx
jl .LBB4_16
jmp .LBB4_17
.LBB4_18: # %._crit_edge65
movq %r14, 24(%rsp) # 8-byte Spill
movq %r13, 32(%rsp) # 8-byte Spill
movq 8(%rsp), %r13
leaq (,%r13,8), %rdi
callq malloc
movq %rax, %r12
testq %r13, %r13
jle .LBB4_21
# %bb.19: # %.lr.ph69
movq (%rsp), %rbp
shlq $3, %rbp
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_20: # =>This Inner Loop Header: Depth=1
movq %rbp, %rdi
callq malloc
movq %rax, (%r12,%rbx,8)
incq %rbx
cmpq %rbx, %r13
jne .LBB4_20
.LBB4_21: # %.preheader47
testq %r13, %r13
jle .LBB4_26
# %bb.22: # %.preheader.lr.ph
movq (%rsp), %rbx
leaq (,%rbx,8), %rbp
xorl %r14d, %r14d
jmp .LBB4_23
.p2align 4, 0x90
.LBB4_25: # %._crit_edge72
# in Loop: Header=BB4_23 Depth=1
incq %r14
cmpq %r14, %r13
je .LBB4_26
.LBB4_23: # %.preheader
# =>This Inner Loop Header: Depth=1
testq %rbx, %rbx
jle .LBB4_25
# %bb.24: # %.lr.ph71
# in Loop: Header=BB4_23 Depth=1
movq (%r12,%r14,8), %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq memset@PLT
jmp .LBB4_25
.LBB4_26: # %._crit_edge74
movq (%rsp), %r9
movq 32(%rsp), %rdi # 8-byte Reload
movq %r12, %rsi
movq 24(%rsp), %rdx # 8-byte Reload
movq %r15, %rcx
movq %r13, %r8
callq _Z14solve_equationPPdS0_S0_lll
movq 8(%rsp), %rsi
movq (%rsp), %rdx
movq %r12, %rdi
callq _Z12print_matrixPPdll
xorl %eax, %eax
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " "
.size .L.str, 2
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001998a0_00000000-6_test_cpu.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4046:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4046:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string " "
.text
.globl _Z12print_matrixPPdll
.type _Z12print_matrixPPdll, @function
_Z12print_matrixPPdll:
.LFB4039:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, 8(%rsp)
orl $256, 32+_ZSt4cout(%rip)
movq $10, 16+_ZSt4cout(%rip)
testq %rsi, %rsi
jle .L3
movq %rdx, %r12
movq %rdi, %r15
movq $0, (%rsp)
leaq _ZSt4cout(%rip), %rbp
leaq .LC0(%rip), %r14
jmp .L10
.L14:
call _ZSt16__throw_bad_castv@PLT
.L8:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
.L9:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $1, (%rsp)
movq (%rsp), %rax
addq $8, %r15
cmpq %rax, 8(%rsp)
je .L3
.L10:
movq %r15, %r13
movq (%r15), %rax
movsd (%rax), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
cmpq $1, %r12
jle .L5
movl $1, %ebx
.L6:
movl $1, %edx
movq %r14, %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq 0(%r13), %rax
movsd (%rax,%rbx,8), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
addq $1, %rbx
cmpq %rbx, %r12
jne .L6
.L5:
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %rbx
testq %rbx, %rbx
je .L14
cmpb $0, 56(%rbx)
je .L8
movzbl 67(%rbx), %esi
jmp .L9
.L3:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4039:
.size _Z12print_matrixPPdll, .-_Z12print_matrixPPdll
.globl _Z9swap_rowsPlll
.type _Z9swap_rowsPlll, @function
_Z9swap_rowsPlll:
.LFB4040:
.cfi_startproc
endbr64
movq %rdx, %rax
leaq (%rdi,%rsi,8), %rdx
movq (%rdx), %rcx
leaq (%rdi,%rax,8), %rax
movq (%rax), %rsi
movq %rsi, (%rdx)
movq %rcx, (%rax)
ret
.cfi_endproc
.LFE4040:
.size _Z9swap_rowsPlll, .-_Z9swap_rowsPlll
.globl _Z11zero_columnPPdPllll
.type _Z11zero_columnPPdPllll, @function
_Z11zero_columnPPdPllll:
.LFB4041:
.cfi_startproc
endbr64
cmpq %rdx, %rcx
jge .L19
salq $3, %r8
movq .LC1(%rip), %xmm2
movsd .LC2(%rip), %xmm1
.L18:
movq (%rsi,%rcx,8), %rax
movq (%rdi,%rax,8), %rax
movsd (%rax,%r8), %xmm0
andpd %xmm2, %xmm0
comisd %xmm1, %xmm0
ja .L20
addq $1, %rcx
cmpq %rcx, %rdx
jne .L18
movl $1, %eax
ret
.L19:
movl $1, %eax
ret
.L20:
movl $0, %eax
ret
.cfi_endproc
.LFE4041:
.size _Z11zero_columnPPdPllll, .-_Z11zero_columnPPdPllll
.globl _Z14solve_equationPPdS0_S0_lll
.type _Z14solve_equationPPdS0_S0_lll, @function
_Z14solve_equationPPdS0_S0_lll:
.LFB4042:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r14
movq %rsi, %rbp
movq %rdx, %r12
movq %r8, %rbx
movq %r9, %r15
movq %rcx, 56(%rsp)
leaq 0(,%rcx,8), %rax
movq %rax, (%rsp)
movq %rax, %rdi
call malloc@PLT
movq %rax, %r13
movq 56(%rsp), %rdx
testq %rdx, %rdx
jle .L23
movl $0, %eax
.L24:
movq %rax, 0(%r13,%rax,8)
movq %rax, %rcx
addq $1, %rax
cmpq %rax, %rdx
jne .L24
movq %rcx, 16(%rsp)
movq (%rsp), %rdi
call malloc@PLT
movl $0, %edx
movq 16(%rsp), %rsi
.L25:
movq %rdx, (%rax,%rdx,8)
movq %rdx, %rcx
addq $1, %rdx
cmpq %rcx, %rsi
jne .L25
testq %rbx, %rbx
jle .L52
movq (%rsp), %rcx
addq %r13, %rcx
movq %rcx, 40(%rsp)
movq $0, 8(%rsp)
movq $0, (%rsp)
movq %rax, 32(%rsp)
movq %rbp, 48(%rsp)
movq 16(%rsp), %rbp
jmp .L39
.L55:
movq %rsi, %rax
.L29:
movq 0(%r13,%rax,8), %rsi
movq (%r14,%rsi,8), %rsi
movsd (%rsi,%rcx), %xmm1
andpd .LC1(%rip), %xmm1
movq 0(%r13,%rdx,8), %rsi
movq (%r14,%rsi,8), %rsi
movsd (%rsi,%rcx), %xmm0
andpd .LC1(%rip), %xmm0
ucomisd %xmm0, %xmm1
cmova %rax, %rdx
leaq 1(%rax), %rsi
cmpq %rax, %rbp
jne .L55
.L27:
movq (%rsp), %rsi
movq %r13, %rdi
call _Z9swap_rowsPlll
movq (%rsp), %rax
leaq 0(,%rax,8), %rdx
movq 0(%r13,%rax,8), %rax
movq %rax, 16(%rsp)
leaq 0(,%rax,8), %rsi
leaq (%r14,%rsi), %rcx
movq 8(%rsp), %rdi
leaq 0(,%rdi,8), %r8
movq (%rcx), %rax
movsd (%rax,%rdi,8), %xmm0
andpd .LC1(%rip), %xmm0
movq (%rsp), %rax
subq $1, %rax
comisd .LC2(%rip), %xmm0
ja .L77
.L38:
leaq 1(%rax), %rcx
movq %rcx, (%rsp)
addq $1, 8(%rsp)
movq 8(%rsp), %rcx
cmpq %rbp, %rax
jge .L74
cmpq %rcx, %rbx
jle .L78
.L39:
movq (%rsp), %rdx
leaq 1(%rdx), %rax
movq %rax, 24(%rsp)
cmpq %rdx, %rbp
jle .L53
movq 8(%rsp), %rcx
salq $3, %rcx
jmp .L29
.L53:
movq (%rsp), %rdx
jmp .L27
.L77:
movq 32(%rsp), %rdi
movq (%rsp), %rax
movq 8(%rsp), %r11
movq %r11, (%rdi,%rax,8)
cmpq %rax, %rbp
jle .L32
leaq 8(%r13,%rdx), %rdi
movq %r13, 24(%rsp)
movq %rbp, %r13
movq 40(%rsp), %rbp
.L37:
movq (%rdi), %rax
leaq 0(,%rax,8), %r9
leaq (%r14,%r9), %r10
movq (%r10), %rax
movsd (%rax,%r8), %xmm1
xorpd .LC4(%rip), %xmm1
movq (%rcx), %rax
divsd (%rax,%r8), %xmm1
cmpq %r11, %rbx
jle .L33
movq %r11, %rax
movq %rsi, 16(%rsp)
.L34:
movq (%r10), %rdx
leaq (%rdx,%rax,8), %rdx
movq (%rcx), %rsi
movapd %xmm1, %xmm0
mulsd (%rsi,%rax,8), %xmm0
addsd (%rdx), %xmm0
movsd %xmm0, (%rdx)
addq $1, %rax
cmpq %rax, %rbx
jne .L34
movq 16(%rsp), %rsi
.L33:
testq %r15, %r15
jle .L35
movl $0, %eax
.L36:
movq (%r12,%r9), %rdx
leaq (%rdx,%rax,8), %rdx
movq (%r12,%rsi), %r10
movapd %xmm1, %xmm0
mulsd (%r10,%rax,8), %xmm0
addsd (%rdx), %xmm0
movsd %xmm0, (%rdx)
addq $1, %rax
cmpq %rax, %r15
jne .L36
.L35:
addq $8, %rdi
cmpq %rbp, %rdi
jne .L37
movq %r13, %rbp
movq 24(%rsp), %r13
movq (%rsp), %rax
jmp .L38
.L78:
movq 32(%rsp), %rax
movq 48(%rsp), %rbp
jmp .L26
.L52:
movq $0, 8(%rsp)
movq $0, (%rsp)
jmp .L26
.L74:
movq 32(%rsp), %rax
movq 48(%rsp), %rbp
.L26:
movq (%rsp), %rsi
cmpq %rsi, 56(%rsp)
je .L60
movq 8(%rsp), %rcx
cmpq %rcx, %rbx
je .L60
.L41:
movl $0, %ecx
movl $0, %edx
testq %r15, %r15
jle .L22
movq .LC1(%rip), %xmm4
movsd .LC2(%rip), %xmm3
movq (%rsp), %rsi
jmp .L43
.L60:
subq $1, (%rsp)
jmp .L41
.L57:
pxor %xmm1, %xmm1
jmp .L45
.L73:
movq 0(%rbp,%r10,8), %rdx
movq $0x000000000, (%rdx,%rcx)
.L49:
subq $1, %r9
cmpq $-1, %r9
je .L79
.L50:
movq %r9, %r11
movq (%rax,%r9,8), %r10
leaq 1(%r10), %rdx
cmpq %rdx, %rbx
jle .L57
movq 0(%r13,%r9,8), %rdi
movq (%r14,%rdi,8), %r8
pxor %xmm1, %xmm1
.L46:
movq 0(%rbp,%rdx,8), %rdi
movsd (%rdi,%rcx), %xmm0
mulsd (%r8,%rdx,8), %xmm0
addsd %xmm0, %xmm1
addq $1, %rdx
cmpq %rdx, %rbx
jne .L46
.L45:
movq 0(%r13,%r11,8), %rdi
movq (%r14,%rdi,8), %rdx
movsd (%rdx,%r10,8), %xmm2
movapd %xmm2, %xmm0
andpd %xmm4, %xmm0
comisd %xmm3, %xmm0
jbe .L73
movq 0(%rbp,%r10,8), %rdx
movq (%r12,%rdi,8), %rdi
movsd (%rdi,%rcx), %xmm0
subsd %xmm1, %xmm0
divsd %xmm2, %xmm0
movsd %xmm0, (%rdx,%rcx)
jmp .L49
.L79:
movq (%rsp), %rdx
.L51:
addq $1, %rdx
addq $8, %rcx
cmpq %rdx, %r15
je .L22
.L43:
movq %rsi, %r9
testq %rsi, %rsi
js .L51
movq %rdx, (%rsp)
jmp .L50
.L32:
movq 32(%rsp), %rax
movq 48(%rsp), %rbp
addq $1, 8(%rsp)
movq 24(%rsp), %rcx
movq %rcx, (%rsp)
jmp .L26
.L23:
movq (%rsp), %rdi
call malloc@PLT
movq $0, 8(%rsp)
movq $0, (%rsp)
jmp .L26
.L22:
addq $72, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4042:
.size _Z14solve_equationPPdS0_S0_lll, .-_Z14solve_equationPPdS0_S0_lll
.globl main
.type main, @function
main:
.LFB4043:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rsi
leaq _ZSt3cin(%rip), %rdi
call _ZNSi10_M_extractIlEERSiRT_@PLT
movq %rax, %rdi
leaq 40(%rsp), %rsi
call _ZNSi10_M_extractIlEERSiRT_@PLT
movq %rax, %rdi
leaq 48(%rsp), %rsi
call _ZNSi10_M_extractIlEERSiRT_@PLT
movq 32(%rsp), %rbx
leaq 0(,%rbx,8), %r12
movq %r12, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
testq %rbx, %rbx
jle .L81
movq %rax, %rbx
movq %rax, %rbp
addq %rax, %r12
.L82:
movq 40(%rsp), %rax
leaq 0(,%rax,8), %rdi
call malloc@PLT
movq %rax, (%rbx)
addq $8, %rbx
cmpq %r12, %rbx
jne .L82
movl $0, %r13d
leaq _ZSt3cin(%rip), %r12
jmp .L83
.L84:
movq 0(%rbp), %rax
leaq (%rax,%rbx,8), %rsi
movq %r12, %rdi
call _ZNSi10_M_extractIdEERSiRT_@PLT
addq $1, %rbx
cmpq %rbx, 40(%rsp)
jg .L84
.L86:
addq $1, %r13
movq 32(%rsp), %rbx
addq $8, %rbp
cmpq %r13, %rbx
jle .L85
.L83:
movl $0, %ebx
cmpq $0, 40(%rsp)
jg .L84
jmp .L86
.L85:
leaq 0(,%rbx,8), %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, 16(%rsp)
testq %rbx, %rbx
jle .L87
movq %rax, %rbx
addq %rax, %rbp
.L88:
movq 48(%rsp), %rax
leaq 0(,%rax,8), %rdi
call malloc@PLT
movq %rax, (%rbx)
addq $8, %rbx
cmpq %rbp, %rbx
jne .L88
movq 16(%rsp), %rbx
movl $0, %r12d
leaq _ZSt3cin(%rip), %rbp
jmp .L99
.L90:
movq (%rbx), %rax
leaq (%rax,%r13,8), %rsi
movq %rbp, %rdi
call _ZNSi10_M_extractIdEERSiRT_@PLT
addq $1, %r13
cmpq %r13, 48(%rsp)
jg .L90
.L92:
addq $1, %r12
movq 32(%rsp), %r13
addq $8, %rbx
cmpq %r12, %r13
jle .L91
.L99:
movl $0, %r13d
cmpq $0, 48(%rsp)
jg .L90
jmp .L92
.L97:
movq (%r14,%rcx,8), %rax
leaq (%r12,%rax), %rdx
.L96:
movq $0x000000000, (%rax)
addq $8, %rax
cmpq %rax, %rdx
jne .L96
.L98:
addq $1, %rcx
cmpq %rcx, %r15
je .L93
.L95:
testq %rbp, %rbp
jg .L97
jmp .L98
.L93:
movq 48(%rsp), %r9
movq %r15, %r8
movq %r13, %rcx
movq 16(%rsp), %rdx
movq %r14, %rsi
movq 24(%rsp), %rdi
call _Z14solve_equationPPdS0_S0_lll
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
movq %r14, %rdi
call _Z12print_matrixPPdll
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L114
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L81:
.cfi_restore_state
movq %r12, %rdi
call malloc@PLT
movq %rax, 16(%rsp)
.L87:
movq 32(%rsp), %r13
.L91:
movq 40(%rsp), %r15
leaq 0(,%r15,8), %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r14
testq %r15, %r15
jle .L93
movq %rax, %rbx
leaq 0(%rbp,%rax), %rax
movq %rax, 8(%rsp)
.L94:
movq 48(%rsp), %rbp
leaq 0(,%rbp,8), %r12
movq %r12, %rdi
call malloc@PLT
movq %rax, (%rbx)
addq $8, %rbx
movq 8(%rsp), %rax
cmpq %rax, %rbx
jne .L94
movl $0, %ecx
jmp .L95
.L114:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4043:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4069:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4069:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC1:
.long -1
.long 2147483647
.long 0
.long 0
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long -1598689907
.long 1051772663
.section .rodata.cst16
.align 16
.LC4:
.long 0
.long -2147483648
.long 0
.long 0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "test_cpu.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z12print_matrixPPdll # -- Begin function _Z12print_matrixPPdll
.p2align 4, 0x90
.type _Z12print_matrixPPdll,@function
_Z12print_matrixPPdll: # @_Z12print_matrixPPdll
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rcx
orl $256, _ZSt4cout+24(%rcx) # imm = 0x100
movq -24(%rax), %rax
movq $10, _ZSt4cout+8(%rax)
testq %rsi, %rsi
jle .LBB0_10
# %bb.1: # %.lr.ph14
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
xorl %r13d, %r13d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_8: # in Loop: Header=BB0_2 Depth=1
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB0_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB0_2 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r13
cmpq %r14, %r13
je .LBB0_10
.LBB0_2: # =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
movq (%r15,%r13,8), %rax
movsd (%rax), %xmm0 # xmm0 = mem[0],zero
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
cmpq $2, %rbx
jl .LBB0_5
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB0_2 Depth=1
movl $1, %r12d
.p2align 4, 0x90
.LBB0_4: # %.lr.ph
# Parent Loop BB0_2 Depth=1
# => This Inner Loop Header: Depth=2
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%r15,%r13,8), %rax
movsd (%rax,%r12,8), %xmm0 # xmm0 = mem[0],zero
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
incq %r12
cmpq %r12, %rbx
jne .LBB0_4
.LBB0_5: # %._crit_edge
# in Loop: Header=BB0_2 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB0_11
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB0_2 Depth=1
cmpb $0, 56(%r12)
je .LBB0_8
# %bb.7: # in Loop: Header=BB0_2 Depth=1
movzbl 67(%r12), %eax
jmp .LBB0_9
.LBB0_10: # %._crit_edge15
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB0_11:
.cfi_def_cfa_offset 48
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size _Z12print_matrixPPdll, .Lfunc_end0-_Z12print_matrixPPdll
.cfi_endproc
# -- End function
.globl _Z9swap_rowsPlll # -- Begin function _Z9swap_rowsPlll
.p2align 4, 0x90
.type _Z9swap_rowsPlll,@function
_Z9swap_rowsPlll: # @_Z9swap_rowsPlll
.cfi_startproc
# %bb.0:
movq (%rdi,%rsi,8), %rax
movq (%rdi,%rdx,8), %rcx
movq %rcx, (%rdi,%rsi,8)
movq %rax, (%rdi,%rdx,8)
retq
.Lfunc_end1:
.size _Z9swap_rowsPlll, .Lfunc_end1-_Z9swap_rowsPlll
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z11zero_columnPPdPllll
.LCPI2_0:
.quad 0x7fffffffffffffff # double NaN
.quad 0x7fffffffffffffff # double NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI2_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z11zero_columnPPdPllll
.p2align 4, 0x90
.type _Z11zero_columnPPdPllll,@function
_Z11zero_columnPPdPllll: # @_Z11zero_columnPPdPllll
.cfi_startproc
# %bb.0:
cmpq %rdx, %rcx
setge %al
jge .LBB2_6
# %bb.1: # %.lr.ph.preheader
movq (%rsi,%rcx,8), %r9
movq (%rdi,%r9,8), %r9
movsd (%r9,%r8,8), %xmm0 # xmm0 = mem[0],zero
andpd .LCPI2_0(%rip), %xmm0
ucomisd .LCPI2_1(%rip), %xmm0
ja .LBB2_6
# %bb.2: # %.lr.ph12.preheader
incq %rcx
movapd .LCPI2_0(%rip), %xmm0 # xmm0 = [NaN,NaN]
movsd .LCPI2_1(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB2_3: # %.lr.ph12
# =>This Inner Loop Header: Depth=1
movq %rcx, %rax
cmpq %rcx, %rdx
je .LBB2_5
# %bb.4: # %.lr.ph
# in Loop: Header=BB2_3 Depth=1
movq (%rsi,%rax,8), %rcx
movq (%rdi,%rcx,8), %rcx
movsd (%rcx,%r8,8), %xmm2 # xmm2 = mem[0],zero
andpd %xmm0, %xmm2
leaq 1(%rax), %rcx
ucomisd %xmm1, %xmm2
jbe .LBB2_3
.LBB2_5: # %._crit_edge.loopexit
cmpq %rdx, %rax
setge %al
.LBB2_6: # %._crit_edge
retq
.Lfunc_end2:
.size _Z11zero_columnPPdPllll, .Lfunc_end2-_Z11zero_columnPPdPllll
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z14solve_equationPPdS0_S0_lll
.LCPI3_0:
.quad 0x7fffffffffffffff # double NaN
.quad 0x7fffffffffffffff # double NaN
.LCPI3_2:
.quad 0x8000000000000000 # double -0
.quad 0x8000000000000000 # double -0
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI3_1:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z14solve_equationPPdS0_S0_lll
.p2align 4, 0x90
.type _Z14solve_equationPPdS0_S0_lll,@function
_Z14solve_equationPPdS0_S0_lll: # @_Z14solve_equationPPdS0_S0_lll
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, %rbx
movq %r8, %r14
movq %rcx, %r15
movq %rdx, 8(%rsp) # 8-byte Spill
movq %rsi, %r12
movq %rdi, %r13
leaq (,%rcx,8), %rdi
movq %rdi, 16(%rsp) # 8-byte Spill
callq malloc
movq %rax, %rbp
testq %r15, %r15
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq %rax, (%rbp,%rax,8)
incq %rax
cmpq %rax, %r15
jne .LBB3_2
.LBB3_3: # %._crit_edge
movq 16(%rsp), %rdi # 8-byte Reload
callq malloc
testq %r15, %r15
jle .LBB3_6
# %bb.4: # %.lr.ph145.preheader
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_5: # %.lr.ph145
# =>This Inner Loop Header: Depth=1
movq %rcx, (%rax,%rcx,8)
incq %rcx
cmpq %rcx, %r15
jne .LBB3_5
.LBB3_6: # %.preheader141
xorl %ecx, %ecx
testq %r15, %r15
jle .LBB3_7
# %bb.8: # %.preheader141
movl $0, %edx
testq %r14, %r14
jle .LBB3_25
# %bb.9: # %.lr.ph163
xorl %edx, %edx
movapd .LCPI3_0(%rip), %xmm0 # xmm0 = [NaN,NaN]
movapd .LCPI3_0(%rip), %xmm1 # xmm1 = [NaN,NaN]
movsd .LCPI3_1(%rip), %xmm2 # xmm2 = mem[0],zero
movapd .LCPI3_2(%rip), %xmm3 # xmm3 = [-0.0E+0,-0.0E+0]
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB3_10: # =>This Loop Header: Depth=1
# Child Loop BB3_19 Depth 2
# Child Loop BB3_13 Depth 2
# Child Loop BB3_14 Depth 3
# Child Loop BB3_17 Depth 3
leaq 1(%rcx), %rsi
movq %rcx, %rdi
movq %rsi, %r8
cmpq %r15, %rsi
jl .LBB3_19
.LBB3_11: # %._crit_edge150
# in Loop: Header=BB3_10 Depth=1
movq (%rbp,%rcx,8), %r8
movq (%rbp,%rdi,8), %r9
movq %r9, (%rbp,%rcx,8)
movq %r8, (%rbp,%rdi,8)
movq (%rbp,%rcx,8), %rdi
movq (%r13,%rdi,8), %r8
movsd (%r8,%rdx,8), %xmm4 # xmm4 = mem[0],zero
andpd %xmm1, %xmm4
ucomisd %xmm2, %xmm4
jbe .LBB3_22
# %bb.12: # in Loop: Header=BB3_10 Depth=1
movq %rdx, (%rax,%rcx,8)
cmpq %r15, %rsi
jl .LBB3_13
jmp .LBB3_23
.p2align 4, 0x90
.LBB3_21: # %.lr.ph149
# in Loop: Header=BB3_19 Depth=2
incq %r8
movq %r9, %rdi
cmpq %r8, %r15
je .LBB3_11
.LBB3_19: # %.lr.ph149
# Parent Loop BB3_10 Depth=1
# => This Inner Loop Header: Depth=2
movq (%rbp,%r8,8), %r9
movq (%r13,%r9,8), %r9
movsd (%r9,%rdx,8), %xmm4 # xmm4 = mem[0],zero
andpd %xmm0, %xmm4
movq (%rbp,%rdi,8), %r9
movq (%r13,%r9,8), %r9
movsd (%r9,%rdx,8), %xmm5 # xmm5 = mem[0],zero
andpd %xmm0, %xmm5
ucomisd %xmm5, %xmm4
movq %r8, %r9
ja .LBB3_21
# %bb.20: # %.lr.ph149
# in Loop: Header=BB3_19 Depth=2
movq %rdi, %r9
jmp .LBB3_21
.p2align 4, 0x90
.LBB3_18: # %._crit_edge156
# in Loop: Header=BB3_13 Depth=2
incq %rsi
cmpq %r15, %rsi
je .LBB3_23
.LBB3_13: # %.lr.ph153.preheader
# Parent Loop BB3_10 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_14 Depth 3
# Child Loop BB3_17 Depth 3
movq (%rbp,%rsi,8), %r9
movq (%r13,%r9,8), %r10
movsd (%r10,%rdx,8), %xmm4 # xmm4 = mem[0],zero
xorpd %xmm3, %xmm4
divsd (%r8,%rdx,8), %xmm4
movq %rdx, %r11
.p2align 4, 0x90
.LBB3_14: # %.lr.ph153
# Parent Loop BB3_10 Depth=1
# Parent Loop BB3_13 Depth=2
# => This Inner Loop Header: Depth=3
movsd (%r8,%r11,8), %xmm5 # xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
addsd (%r10,%r11,8), %xmm5
movsd %xmm5, (%r10,%r11,8)
incq %r11
cmpq %r14, %r11
jl .LBB3_14
# %bb.15: # %.preheader140
# in Loop: Header=BB3_13 Depth=2
testq %rbx, %rbx
jle .LBB3_18
# %bb.16: # %.lr.ph155
# in Loop: Header=BB3_13 Depth=2
movq 8(%rsp), %r11 # 8-byte Reload
movq (%r11,%rdi,8), %r10
movq (%r11,%r9,8), %r9
xorl %r11d, %r11d
.p2align 4, 0x90
.LBB3_17: # Parent Loop BB3_10 Depth=1
# Parent Loop BB3_13 Depth=2
# => This Inner Loop Header: Depth=3
movsd (%r10,%r11,8), %xmm5 # xmm5 = mem[0],zero
mulsd %xmm4, %xmm5
addsd (%r9,%r11,8), %xmm5
movsd %xmm5, (%r9,%r11,8)
incq %r11
cmpq %r11, %rbx
jne .LBB3_17
jmp .LBB3_18
.p2align 4, 0x90
.LBB3_22: # in Loop: Header=BB3_10 Depth=1
decq %rcx
.LBB3_23: # %.loopexit
# in Loop: Header=BB3_10 Depth=1
incq %rcx
incq %rdx
cmpq %r15, %rcx
jge .LBB3_25
# %bb.24: # %.loopexit
# in Loop: Header=BB3_10 Depth=1
cmpq %r14, %rdx
jl .LBB3_10
jmp .LBB3_25
.LBB3_7:
xorl %edx, %edx
.LBB3_25: # %._crit_edge164
cmpq %r15, %rcx
sete %sil
cmpq %r14, %rdx
sete %dl
testq %rbx, %rbx
movq 8(%rsp), %r11 # 8-byte Reload
jle .LBB3_29
# %bb.26: # %.preheader.lr.ph
orb %dl, %sil
movzbl %sil, %edx
subq %rdx, %rcx
xorl %edx, %edx
movapd .LCPI3_0(%rip), %xmm0 # xmm0 = [NaN,NaN]
movsd .LCPI3_1(%rip), %xmm1 # xmm1 = mem[0],zero
jmp .LBB3_27
.p2align 4, 0x90
.LBB3_28: # %._crit_edge176
# in Loop: Header=BB3_27 Depth=1
incq %rdx
cmpq %rbx, %rdx
je .LBB3_29
.LBB3_27: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_30 Depth 2
# Child Loop BB3_32 Depth 3
movq %rcx, %rsi
testq %rcx, %rcx
jns .LBB3_30
jmp .LBB3_28
.p2align 4, 0x90
.LBB3_35: # in Loop: Header=BB3_30 Depth=2
movq (%r12,%rdi,8), %rdi
movsd %xmm2, (%rdi,%rdx,8)
leaq -1(%rsi), %rdi
testq %rsi, %rsi
movq %rdi, %rsi
jle .LBB3_28
.LBB3_30: # %.lr.ph175
# Parent Loop BB3_27 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_32 Depth 3
movq (%rax,%rsi,8), %rdi
leaq 1(%rdi), %r8
xorpd %xmm2, %xmm2
xorpd %xmm3, %xmm3
cmpq %r14, %r8
jge .LBB3_33
# %bb.31: # %.lr.ph171
# in Loop: Header=BB3_30 Depth=2
movq (%rbp,%rsi,8), %r9
movq (%r13,%r9,8), %r9
.p2align 4, 0x90
.LBB3_32: # Parent Loop BB3_27 Depth=1
# Parent Loop BB3_30 Depth=2
# => This Inner Loop Header: Depth=3
movsd (%r9,%r8,8), %xmm4 # xmm4 = mem[0],zero
movq (%r12,%r8,8), %r10
mulsd (%r10,%rdx,8), %xmm4
addsd %xmm4, %xmm3
incq %r8
cmpq %r8, %r14
jne .LBB3_32
.LBB3_33: # %._crit_edge172
# in Loop: Header=BB3_30 Depth=2
movq (%rbp,%rsi,8), %r8
movq (%r13,%r8,8), %r9
movsd (%r9,%rdi,8), %xmm4 # xmm4 = mem[0],zero
movapd %xmm4, %xmm5
andpd %xmm0, %xmm5
ucomisd %xmm1, %xmm5
jbe .LBB3_35
# %bb.34: # in Loop: Header=BB3_30 Depth=2
movq (%r11,%r8,8), %r8
movsd (%r8,%rdx,8), %xmm2 # xmm2 = mem[0],zero
subsd %xmm3, %xmm2
divsd %xmm4, %xmm2
jmp .LBB3_35
.LBB3_29: # %._crit_edge178
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z14solve_equationPPdS0_S0_lll, .Lfunc_end3-_Z14solve_equationPPdS0_S0_lll
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 16(%rsp), %rsi
movl $_ZSt3cin, %edi
callq _ZNSi10_M_extractIlEERSiRT_
leaq 8(%rsp), %rsi
movq %rax, %rdi
callq _ZNSi10_M_extractIlEERSiRT_
movq %rsp, %rsi
movq %rax, %rdi
callq _ZNSi10_M_extractIlEERSiRT_
movq 16(%rsp), %r15
leaq (,%r15,8), %rdi
callq malloc
movq %rax, %r13
testq %r15, %r15
jle .LBB4_3
# %bb.1: # %.lr.ph
movq 8(%rsp), %r14
shlq $3, %r14
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_2: # =>This Inner Loop Header: Depth=1
movq %r14, %rdi
callq malloc
movq %rax, (%r13,%rbx,8)
incq %rbx
cmpq %rbx, %r15
jne .LBB4_2
.LBB4_3: # %.preheader51
movq 16(%rsp), %r12
testq %r12, %r12
jle .LBB4_9
# %bb.4: # %.preheader50.preheader
xorl %r15d, %r15d
jmp .LBB4_5
.p2align 4, 0x90
.LBB4_8: # %._crit_edge
# in Loop: Header=BB4_5 Depth=1
incq %r15
movq 16(%rsp), %r12
cmpq %r12, %r15
jge .LBB4_9
.LBB4_5: # %.preheader50
# =>This Loop Header: Depth=1
# Child Loop BB4_7 Depth 2
cmpq $0, 8(%rsp)
jle .LBB4_8
# %bb.6: # %.lr.ph55
# in Loop: Header=BB4_5 Depth=1
movq (%r13,%r15,8), %r14
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_7: # Parent Loop BB4_5 Depth=1
# => This Inner Loop Header: Depth=2
movl $_ZSt3cin, %edi
movq %r14, %rsi
callq _ZNSi10_M_extractIdEERSiRT_
incq %rbx
addq $8, %r14
cmpq 8(%rsp), %rbx
jl .LBB4_7
jmp .LBB4_8
.LBB4_9: # %._crit_edge57
leaq (,%r12,8), %rdi
callq malloc
movq %rax, %r14
testq %r12, %r12
jle .LBB4_12
# %bb.10: # %.lr.ph60
movq (%rsp), %r15
shlq $3, %r15
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_11: # =>This Inner Loop Header: Depth=1
movq %r15, %rdi
callq malloc
movq %rax, (%r14,%rbx,8)
incq %rbx
cmpq %rbx, %r12
jne .LBB4_11
.LBB4_12: # %.preheader49
movq 16(%rsp), %r15
testq %r15, %r15
jle .LBB4_18
# %bb.13: # %.preheader48.preheader
xorl %r12d, %r12d
jmp .LBB4_14
.p2align 4, 0x90
.LBB4_17: # %._crit_edge63
# in Loop: Header=BB4_14 Depth=1
incq %r12
movq 16(%rsp), %r15
cmpq %r15, %r12
jge .LBB4_18
.LBB4_14: # %.preheader48
# =>This Loop Header: Depth=1
# Child Loop BB4_16 Depth 2
cmpq $0, (%rsp)
jle .LBB4_17
# %bb.15: # %.lr.ph62
# in Loop: Header=BB4_14 Depth=1
movq (%r14,%r12,8), %r15
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_16: # Parent Loop BB4_14 Depth=1
# => This Inner Loop Header: Depth=2
movl $_ZSt3cin, %edi
movq %r15, %rsi
callq _ZNSi10_M_extractIdEERSiRT_
incq %rbx
addq $8, %r15
cmpq (%rsp), %rbx
jl .LBB4_16
jmp .LBB4_17
.LBB4_18: # %._crit_edge65
movq %r14, 24(%rsp) # 8-byte Spill
movq %r13, 32(%rsp) # 8-byte Spill
movq 8(%rsp), %r13
leaq (,%r13,8), %rdi
callq malloc
movq %rax, %r12
testq %r13, %r13
jle .LBB4_21
# %bb.19: # %.lr.ph69
movq (%rsp), %rbp
shlq $3, %rbp
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB4_20: # =>This Inner Loop Header: Depth=1
movq %rbp, %rdi
callq malloc
movq %rax, (%r12,%rbx,8)
incq %rbx
cmpq %rbx, %r13
jne .LBB4_20
.LBB4_21: # %.preheader47
testq %r13, %r13
jle .LBB4_26
# %bb.22: # %.preheader.lr.ph
movq (%rsp), %rbx
leaq (,%rbx,8), %rbp
xorl %r14d, %r14d
jmp .LBB4_23
.p2align 4, 0x90
.LBB4_25: # %._crit_edge72
# in Loop: Header=BB4_23 Depth=1
incq %r14
cmpq %r14, %r13
je .LBB4_26
.LBB4_23: # %.preheader
# =>This Inner Loop Header: Depth=1
testq %rbx, %rbx
jle .LBB4_25
# %bb.24: # %.lr.ph71
# in Loop: Header=BB4_23 Depth=1
movq (%r12,%r14,8), %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq memset@PLT
jmp .LBB4_25
.LBB4_26: # %._crit_edge74
movq (%rsp), %r9
movq 32(%rsp), %rdi # 8-byte Reload
movq %r12, %rsi
movq 24(%rsp), %rdx # 8-byte Reload
movq %r15, %rcx
movq %r13, %r8
callq _Z14solve_equationPPdS0_S0_lll
movq 8(%rsp), %rsi
movq (%rsp), %rdx
movq %r12, %rdi
callq _Z12print_matrixPPdll
xorl %eax, %eax
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " "
.size .L.str, 2
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | extern "C"
__global__ void setValue_kernel(int *vals)
{
int N = 1e6;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
vals[idx] = idx;
} | code for sm_80
Function : setValue_kernel
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R5, R5, c[0x0][0x0], R0 ; /* 0x0000000005057a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GT.AND P0, PT, R5, 0xf423f, PT ; /* 0x000f423f0500780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */
/* 0x000fca00078e0202 */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | extern "C"
__global__ void setValue_kernel(int *vals)
{
int N = 1e6;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
vals[idx] = idx;
} | .file "tmpxft_0010522a_00000000-6_fixedNumSet.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z15setValue_kernelPiPi
.type _Z35__device_stub__Z15setValue_kernelPiPi, @function
_Z35__device_stub__Z15setValue_kernelPiPi:
.LFB2051:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq setValue_kernel(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z35__device_stub__Z15setValue_kernelPiPi, .-_Z35__device_stub__Z15setValue_kernelPiPi
.globl setValue_kernel
.type setValue_kernel, @function
setValue_kernel:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z15setValue_kernelPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size setValue_kernel, .-setValue_kernel
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "setValue_kernel"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq setValue_kernel(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | extern "C"
__global__ void setValue_kernel(int *vals)
{
int N = 1e6;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
vals[idx] = idx;
} | #include <hip/hip_runtime.h>
extern "C"
__global__ void setValue_kernel(int *vals)
{
int N = 1e6;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
vals[idx] = idx;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
extern "C"
__global__ void setValue_kernel(int *vals)
{
int N = 1e6;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
vals[idx] = idx;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected setValue_kernel
.globl setValue_kernel
.p2align 8
.type setValue_kernel,@function
setValue_kernel:
s_load_b32 s2, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e32 0xf4240, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_store_b32 v[2:3], v1, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel setValue_kernel
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size setValue_kernel, .Lfunc_end0-setValue_kernel
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: setValue_kernel
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: setValue_kernel.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
extern "C"
__global__ void setValue_kernel(int *vals)
{
int N = 1e6;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
vals[idx] = idx;
} | .text
.file "fixedNumSet.hip"
.globl __device_stub__setValue_kernel # -- Begin function __device_stub__setValue_kernel
.p2align 4, 0x90
.type __device_stub__setValue_kernel,@function
__device_stub__setValue_kernel: # @__device_stub__setValue_kernel
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $setValue_kernel, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size __device_stub__setValue_kernel, .Lfunc_end0-__device_stub__setValue_kernel
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $setValue_kernel, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type setValue_kernel,@object # @setValue_kernel
.section .rodata,"a",@progbits
.globl setValue_kernel
.p2align 3, 0x0
setValue_kernel:
.quad __device_stub__setValue_kernel
.size setValue_kernel, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "setValue_kernel"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__setValue_kernel
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym setValue_kernel
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : setValue_kernel
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R5, R5, c[0x0][0x0], R0 ; /* 0x0000000005057a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GT.AND P0, PT, R5, 0xf423f, PT ; /* 0x000f423f0500780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */
/* 0x000fca00078e0202 */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected setValue_kernel
.globl setValue_kernel
.p2align 8
.type setValue_kernel,@function
setValue_kernel:
s_load_b32 s2, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e32 0xf4240, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_store_b32 v[2:3], v1, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel setValue_kernel
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size setValue_kernel, .Lfunc_end0-setValue_kernel
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: setValue_kernel
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: setValue_kernel.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0010522a_00000000-6_fixedNumSet.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z15setValue_kernelPiPi
.type _Z35__device_stub__Z15setValue_kernelPiPi, @function
_Z35__device_stub__Z15setValue_kernelPiPi:
.LFB2051:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq setValue_kernel(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z35__device_stub__Z15setValue_kernelPiPi, .-_Z35__device_stub__Z15setValue_kernelPiPi
.globl setValue_kernel
.type setValue_kernel, @function
setValue_kernel:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z15setValue_kernelPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size setValue_kernel, .-setValue_kernel
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "setValue_kernel"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq setValue_kernel(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "fixedNumSet.hip"
.globl __device_stub__setValue_kernel # -- Begin function __device_stub__setValue_kernel
.p2align 4, 0x90
.type __device_stub__setValue_kernel,@function
__device_stub__setValue_kernel: # @__device_stub__setValue_kernel
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $setValue_kernel, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size __device_stub__setValue_kernel, .Lfunc_end0-__device_stub__setValue_kernel
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $setValue_kernel, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type setValue_kernel,@object # @setValue_kernel
.section .rodata,"a",@progbits
.globl setValue_kernel
.p2align 3, 0x0
setValue_kernel:
.quad __device_stub__setValue_kernel
.size setValue_kernel, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "setValue_kernel"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__setValue_kernel
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym setValue_kernel
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <iterator>
#include <thrust/copy.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/transform.h>
#include <algorithm>
#include <vector>
#include <thrust/sort.h>
#include <sys/times.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
void start_clock(void);
void end_clock(char *msg);
static clock_t st_time;
static clock_t en_time;
static struct tms st_cpu;
static struct tms en_cpu;
void
start_clock()
{
st_time = times(&st_cpu);
}
void end_clock(char *msg)
{
en_time = times(&en_cpu);
std::cout<< "Sort type : " << msg << std::endl<< " Time elapsed:"<< (intmax_t)(en_time - st_time)<<std::endl;
}
void generateRandom(double & i)
{
i = rand();
}
int main(int argc, char ** argv)
{
if(argc<2)
{
std::cout<<"Please provide size as argument"<<std::endl;
return 1;
}
long vec_size =atoi(argv[1]);
{
start_clock();
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), generateRandom);
std::sort(vec.begin(), vec.end());
end_clock("CPU all");
}
{
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), generateRandom);
start_clock();
std::sort(vec.begin(), vec.end());
end_clock("CPU sort only");
}
{
cudaDeviceReset();
start_clock();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
hv = d;
end_clock("thrust ALL");
}
{
cudaDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
start_clock();
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
thrust::copy(d.begin(), d.end(), hv.begin());
end_clock("Thrust sort and copy and alloc");
}
{
cudaDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d(vec_size);
start_clock();
thrust::copy(hv.begin(), hv.end(), d.begin());
thrust::sort(d.begin(), d.end());
thrust::copy(d.begin(), d.end(), hv.begin());
end_clock("thrust sort and copy");
}
{
cudaDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d = hv;
start_clock();
thrust::sort(d.begin(), d.end());
end_clock("thrust sort only");
hv = d;
}
} | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <iterator>
#include <thrust/copy.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/transform.h>
#include <algorithm>
#include <vector>
#include <thrust/sort.h>
#include <sys/times.h>
#include <hip/hip_runtime.h>
void start_clock(void);
void end_clock(char *msg);
static clock_t st_time;
static clock_t en_time;
static struct tms st_cpu;
static struct tms en_cpu;
void
start_clock()
{
st_time = times(&st_cpu);
}
void end_clock(char *msg)
{
en_time = times(&en_cpu);
std::cout<< "Sort type : " << msg << std::endl<< " Time elapsed:"<< (intmax_t)(en_time - st_time)<<std::endl;
}
void generateRandom(double & i)
{
i = rand();
}
int main(int argc, char ** argv)
{
if(argc<2)
{
std::cout<<"Please provide size as argument"<<std::endl;
return 1;
}
long vec_size =atoi(argv[1]);
{
start_clock();
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), generateRandom);
std::sort(vec.begin(), vec.end());
end_clock("CPU all");
}
{
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), generateRandom);
start_clock();
std::sort(vec.begin(), vec.end());
end_clock("CPU sort only");
}
{
hipDeviceReset();
start_clock();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
hv = d;
end_clock("thrust ALL");
}
{
hipDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
start_clock();
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
thrust::copy(d.begin(), d.end(), hv.begin());
end_clock("Thrust sort and copy and alloc");
}
{
hipDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d(vec_size);
start_clock();
thrust::copy(hv.begin(), hv.end(), d.begin());
thrust::sort(d.begin(), d.end());
thrust::copy(d.begin(), d.end(), hv.begin());
end_clock("thrust sort and copy");
}
{
hipDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d = hv;
start_clock();
thrust::sort(d.begin(), d.end());
end_clock("thrust sort only");
hv = d;
}
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // a toy program to get device property
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
int main(){
int dev_count;
cudaGetDeviceCount(&dev_count);
printf("Number of CUDA devices: %d\n", dev_count);
cudaDeviceProp prop;
int i;
for (i = 0; i < dev_count; i++){
cudaGetDeviceProperties(&prop,i);
printf("Name: %s\n", prop.name);
printf("SM count: %d\n", prop.multiProcessorCount);
printf("Max threads per SM: %d\n", prop.maxThreadsPerBlock);
printf("Max threads dim x: %d\n", prop.maxThreadsDim[0]);
printf("Max threads dim y: %d\n", prop.maxThreadsDim[1]);
printf("Max threads dim z: %d\n", prop.maxThreadsDim[2]);
printf("Number of threads in a warp: %d\n", prop.warpSize);
printf("Max memory (GB) on this device: %d\n", (int)(prop.totalGlobalMem * pow(10, -9)));
printf("Max shared memory (KB) per block: %d\n", (int)(prop.sharedMemPerBlock * pow(10, -3)));
printf("Total constant memory (KB): %d\n", (int) (prop.totalConstMem * pow(10, -3)));
}
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
exit(-1);
}
return 0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // a toy program to get device property
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
int main(){
int dev_count;
cudaGetDeviceCount(&dev_count);
printf("Number of CUDA devices: %d\n", dev_count);
cudaDeviceProp prop;
int i;
for (i = 0; i < dev_count; i++){
cudaGetDeviceProperties(&prop,i);
printf("Name: %s\n", prop.name);
printf("SM count: %d\n", prop.multiProcessorCount);
printf("Max threads per SM: %d\n", prop.maxThreadsPerBlock);
printf("Max threads dim x: %d\n", prop.maxThreadsDim[0]);
printf("Max threads dim y: %d\n", prop.maxThreadsDim[1]);
printf("Max threads dim z: %d\n", prop.maxThreadsDim[2]);
printf("Number of threads in a warp: %d\n", prop.warpSize);
printf("Max memory (GB) on this device: %d\n", (int)(prop.totalGlobalMem * pow(10, -9)));
printf("Max shared memory (KB) per block: %d\n", (int)(prop.sharedMemPerBlock * pow(10, -3)));
printf("Total constant memory (KB): %d\n", (int) (prop.totalConstMem * pow(10, -3)));
}
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
exit(-1);
}
return 0;
} | .file "tmpxft_00059f83_00000000-6_query.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Number of CUDA devices: %d\n"
.LC1:
.string "Name: %s\n"
.LC2:
.string "SM count: %d\n"
.LC3:
.string "Max threads per SM: %d\n"
.LC4:
.string "Max threads dim x: %d\n"
.LC5:
.string "Max threads dim y: %d\n"
.LC6:
.string "Max threads dim z: %d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "Number of threads in a warp: %d\n"
.align 8
.LC9:
.string "Max memory (GB) on this device: %d\n"
.align 8
.LC11:
.string "Max shared memory (KB) per block: %d\n"
.align 8
.LC12:
.string "Total constant memory (KB): %d\n"
.section .rodata.str1.1
.LC13:
.string "ERROR: %s\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $1056, %rsp
.cfi_def_cfa_offset 1088
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L4
movl $0, %ebx
leaq .LC1(%rip), %rbp
jmp .L11
.L5:
movq %rax, %rdx
shrq %rdx
andl $1, %eax
orq %rax, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
addsd %xmm0, %xmm0
jmp .L6
.L7:
movq %rax, %rdx
shrq %rdx
andl $1, %eax
orq %rax, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
addsd %xmm0, %xmm0
jmp .L8
.L9:
movq %rax, %rdx
shrq %rdx
andl $1, %eax
orq %rax, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
addsd %xmm0, %xmm0
.L10:
mulsd .LC10(%rip), %xmm0
cvttsd2sil %xmm0, %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jle .L4
.L11:
leaq 16(%rsp), %r12
movl %ebx, %esi
movq %r12, %rdi
call cudaGetDeviceProperties_v2@PLT
movq %r12, %rdx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 404(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 340(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 344(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 348(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 324(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 304(%rsp), %rax
testq %rax, %rax
js .L5
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
.L6:
mulsd .LC8(%rip), %xmm0
cvttsd2sil %xmm0, %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 312(%rsp), %rax
testq %rax, %rax
js .L7
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
.L8:
mulsd .LC10(%rip), %xmm0
cvttsd2sil %xmm0, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 368(%rsp), %rax
testq %rax, %rax
js .L9
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
jmp .L10
.L4:
call cudaDeviceSynchronize@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L16
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L17
movl $0, %eax
addq $1056, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC13(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC8:
.long -400107883
.long 1041313291
.align 8
.LC10:
.long -755914244
.long 1062232653
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // a toy program to get device property
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
int main(){
int dev_count;
cudaGetDeviceCount(&dev_count);
printf("Number of CUDA devices: %d\n", dev_count);
cudaDeviceProp prop;
int i;
for (i = 0; i < dev_count; i++){
cudaGetDeviceProperties(&prop,i);
printf("Name: %s\n", prop.name);
printf("SM count: %d\n", prop.multiProcessorCount);
printf("Max threads per SM: %d\n", prop.maxThreadsPerBlock);
printf("Max threads dim x: %d\n", prop.maxThreadsDim[0]);
printf("Max threads dim y: %d\n", prop.maxThreadsDim[1]);
printf("Max threads dim z: %d\n", prop.maxThreadsDim[2]);
printf("Number of threads in a warp: %d\n", prop.warpSize);
printf("Max memory (GB) on this device: %d\n", (int)(prop.totalGlobalMem * pow(10, -9)));
printf("Max shared memory (KB) per block: %d\n", (int)(prop.sharedMemPerBlock * pow(10, -3)));
printf("Total constant memory (KB): %d\n", (int) (prop.totalConstMem * pow(10, -3)));
}
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
exit(-1);
}
return 0;
} | // a toy program to get device property
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
int main(){
int dev_count;
hipGetDeviceCount(&dev_count);
printf("Number of CUDA devices: %d\n", dev_count);
hipDeviceProp_t prop;
int i;
for (i = 0; i < dev_count; i++){
hipGetDeviceProperties(&prop,i);
printf("Name: %s\n", prop.name);
printf("SM count: %d\n", prop.multiProcessorCount);
printf("Max threads per SM: %d\n", prop.maxThreadsPerBlock);
printf("Max threads dim x: %d\n", prop.maxThreadsDim[0]);
printf("Max threads dim y: %d\n", prop.maxThreadsDim[1]);
printf("Max threads dim z: %d\n", prop.maxThreadsDim[2]);
printf("Number of threads in a warp: %d\n", prop.warpSize);
printf("Max memory (GB) on this device: %d\n", (int)(prop.totalGlobalMem * pow(10, -9)));
printf("Max shared memory (KB) per block: %d\n", (int)(prop.sharedMemPerBlock * pow(10, -3)));
printf("Total constant memory (KB): %d\n", (int) (prop.totalConstMem * pow(10, -3)));
}
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess)
{
fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error) );
exit(-1);
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // a toy program to get device property
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
int main(){
int dev_count;
hipGetDeviceCount(&dev_count);
printf("Number of CUDA devices: %d\n", dev_count);
hipDeviceProp_t prop;
int i;
for (i = 0; i < dev_count; i++){
hipGetDeviceProperties(&prop,i);
printf("Name: %s\n", prop.name);
printf("SM count: %d\n", prop.multiProcessorCount);
printf("Max threads per SM: %d\n", prop.maxThreadsPerBlock);
printf("Max threads dim x: %d\n", prop.maxThreadsDim[0]);
printf("Max threads dim y: %d\n", prop.maxThreadsDim[1]);
printf("Max threads dim z: %d\n", prop.maxThreadsDim[2]);
printf("Number of threads in a warp: %d\n", prop.warpSize);
printf("Max memory (GB) on this device: %d\n", (int)(prop.totalGlobalMem * pow(10, -9)));
printf("Max shared memory (KB) per block: %d\n", (int)(prop.sharedMemPerBlock * pow(10, -3)));
printf("Total constant memory (KB): %d\n", (int) (prop.totalConstMem * pow(10, -3)));
}
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess)
{
fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error) );
exit(-1);
}
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // a toy program to get device property
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
int main(){
int dev_count;
hipGetDeviceCount(&dev_count);
printf("Number of CUDA devices: %d\n", dev_count);
hipDeviceProp_t prop;
int i;
for (i = 0; i < dev_count; i++){
hipGetDeviceProperties(&prop,i);
printf("Name: %s\n", prop.name);
printf("SM count: %d\n", prop.multiProcessorCount);
printf("Max threads per SM: %d\n", prop.maxThreadsPerBlock);
printf("Max threads dim x: %d\n", prop.maxThreadsDim[0]);
printf("Max threads dim y: %d\n", prop.maxThreadsDim[1]);
printf("Max threads dim z: %d\n", prop.maxThreadsDim[2]);
printf("Number of threads in a warp: %d\n", prop.warpSize);
printf("Max memory (GB) on this device: %d\n", (int)(prop.totalGlobalMem * pow(10, -9)));
printf("Max shared memory (KB) per block: %d\n", (int)(prop.sharedMemPerBlock * pow(10, -3)));
printf("Total constant memory (KB): %d\n", (int) (prop.totalConstMem * pow(10, -3)));
}
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess)
{
fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error) );
exit(-1);
}
return 0;
} | .text
.file "query.hip"
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI0_0:
.long 1127219200 # 0x43300000
.long 1160773632 # 0x45300000
.long 0 # 0x0
.long 0 # 0x0
.LCPI0_1:
.quad 0x4330000000000000 # double 4503599627370496
.quad 0x4530000000000000 # double 1.9342813113834067E+25
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI0_2:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.LCPI0_3:
.quad 0x3f50624dd2f1a9fc # double 0.001
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1504
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
movl 4(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
cmpl $0, 4(%rsp)
jle .LBB0_3
# %bb.1: # %.lr.ph
leaq 8(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl 396(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 328(%rsp), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 332(%rsp), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl 336(%rsp), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movl 340(%rsp), %esi
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
movl 316(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movsd 296(%rsp), %xmm0 # xmm0 = mem[0],zero
movapd .LCPI0_0(%rip), %xmm1 # xmm1 = [1127219200,1160773632,0,0]
unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
movapd .LCPI0_1(%rip), %xmm1 # xmm1 = [4.503599627370496E+15,1.9342813113834067E+25]
subpd %xmm1, %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd .LCPI0_2(%rip), %xmm1
cvttsd2si %xmm1, %esi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movsd 304(%rsp), %xmm0 # xmm0 = mem[0],zero
unpcklps .LCPI0_0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd .LCPI0_1(%rip), %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
movsd .LCPI0_3(%rip), %xmm0 # xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
cvttsd2si %xmm1, %esi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movsd 360(%rsp), %xmm0 # xmm0 = mem[0],zero
unpcklps .LCPI0_0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd .LCPI0_1(%rip), %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd .LCPI0_3(%rip), %xmm1
cvttsd2si %xmm1, %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl 4(%rsp), %ebp
jl .LBB0_2
.LBB0_3: # %._crit_edge
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
jne .LBB0_5
# %bb.4:
xorl %eax, %eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_5:
.cfi_def_cfa_offset 1504
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movq %rbx, %rdi
movq %rax, %rdx
xorl %eax, %eax
callq fprintf
movl $-1, %edi
callq exit
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Number of CUDA devices: %d\n"
.size .L.str, 28
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Name: %s\n"
.size .L.str.1, 10
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "SM count: %d\n"
.size .L.str.2, 14
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Max threads per SM: %d\n"
.size .L.str.3, 24
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Max threads dim x: %d\n"
.size .L.str.4, 23
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Max threads dim y: %d\n"
.size .L.str.5, 23
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Max threads dim z: %d\n"
.size .L.str.6, 23
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Number of threads in a warp: %d\n"
.size .L.str.7, 33
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Max memory (GB) on this device: %d\n"
.size .L.str.8, 36
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Max shared memory (KB) per block: %d\n"
.size .L.str.9, 38
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Total constant memory (KB): %d\n"
.size .L.str.10, 32
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "ERROR: %s\n"
.size .L.str.11, 11
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00059f83_00000000-6_query.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Number of CUDA devices: %d\n"
.LC1:
.string "Name: %s\n"
.LC2:
.string "SM count: %d\n"
.LC3:
.string "Max threads per SM: %d\n"
.LC4:
.string "Max threads dim x: %d\n"
.LC5:
.string "Max threads dim y: %d\n"
.LC6:
.string "Max threads dim z: %d\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "Number of threads in a warp: %d\n"
.align 8
.LC9:
.string "Max memory (GB) on this device: %d\n"
.align 8
.LC11:
.string "Max shared memory (KB) per block: %d\n"
.align 8
.LC12:
.string "Total constant memory (KB): %d\n"
.section .rodata.str1.1
.LC13:
.string "ERROR: %s\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $1056, %rsp
.cfi_def_cfa_offset 1088
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
movl 12(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 12(%rsp)
jle .L4
movl $0, %ebx
leaq .LC1(%rip), %rbp
jmp .L11
.L5:
movq %rax, %rdx
shrq %rdx
andl $1, %eax
orq %rax, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
addsd %xmm0, %xmm0
jmp .L6
.L7:
movq %rax, %rdx
shrq %rdx
andl $1, %eax
orq %rax, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
addsd %xmm0, %xmm0
jmp .L8
.L9:
movq %rax, %rdx
shrq %rdx
andl $1, %eax
orq %rax, %rdx
pxor %xmm0, %xmm0
cvtsi2sdq %rdx, %xmm0
addsd %xmm0, %xmm0
.L10:
mulsd .LC10(%rip), %xmm0
cvttsd2sil %xmm0, %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jle .L4
.L11:
leaq 16(%rsp), %r12
movl %ebx, %esi
movq %r12, %rdi
call cudaGetDeviceProperties_v2@PLT
movq %r12, %rdx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 404(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 340(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 344(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 348(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 324(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 304(%rsp), %rax
testq %rax, %rax
js .L5
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
.L6:
mulsd .LC8(%rip), %xmm0
cvttsd2sil %xmm0, %edx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 312(%rsp), %rax
testq %rax, %rax
js .L7
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
.L8:
mulsd .LC10(%rip), %xmm0
cvttsd2sil %xmm0, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 368(%rsp), %rax
testq %rax, %rax
js .L9
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
jmp .L10
.L4:
call cudaDeviceSynchronize@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L16
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L17
movl $0, %eax
addq $1056, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC13(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $-1, %edi
call exit@PLT
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC8:
.long -400107883
.long 1041313291
.align 8
.LC10:
.long -755914244
.long 1062232653
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "query.hip"
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI0_0:
.long 1127219200 # 0x43300000
.long 1160773632 # 0x45300000
.long 0 # 0x0
.long 0 # 0x0
.LCPI0_1:
.quad 0x4330000000000000 # double 4503599627370496
.quad 0x4530000000000000 # double 1.9342813113834067E+25
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI0_2:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.LCPI0_3:
.quad 0x3f50624dd2f1a9fc # double 0.001
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 1504
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
movl 4(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
cmpl $0, 4(%rsp)
jle .LBB0_3
# %bb.1: # %.lr.ph
leaq 8(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl 396(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl 328(%rsp), %esi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 332(%rsp), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl 336(%rsp), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movl 340(%rsp), %esi
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
movl 316(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movsd 296(%rsp), %xmm0 # xmm0 = mem[0],zero
movapd .LCPI0_0(%rip), %xmm1 # xmm1 = [1127219200,1160773632,0,0]
unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
movapd .LCPI0_1(%rip), %xmm1 # xmm1 = [4.503599627370496E+15,1.9342813113834067E+25]
subpd %xmm1, %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd .LCPI0_2(%rip), %xmm1
cvttsd2si %xmm1, %esi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movsd 304(%rsp), %xmm0 # xmm0 = mem[0],zero
unpcklps .LCPI0_0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd .LCPI0_1(%rip), %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
movsd .LCPI0_3(%rip), %xmm0 # xmm0 = mem[0],zero
mulsd %xmm0, %xmm1
cvttsd2si %xmm1, %esi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movsd 360(%rsp), %xmm0 # xmm0 = mem[0],zero
unpcklps .LCPI0_0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd .LCPI0_1(%rip), %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd .LCPI0_3(%rip), %xmm1
cvttsd2si %xmm1, %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl 4(%rsp), %ebp
jl .LBB0_2
.LBB0_3: # %._crit_edge
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
jne .LBB0_5
# %bb.4:
xorl %eax, %eax
addq $1480, %rsp # imm = 0x5C8
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_5:
.cfi_def_cfa_offset 1504
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movq %rbx, %rdi
movq %rax, %rdx
xorl %eax, %eax
callq fprintf
movl $-1, %edi
callq exit
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Number of CUDA devices: %d\n"
.size .L.str, 28
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Name: %s\n"
.size .L.str.1, 10
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "SM count: %d\n"
.size .L.str.2, 14
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Max threads per SM: %d\n"
.size .L.str.3, 24
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Max threads dim x: %d\n"
.size .L.str.4, 23
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Max threads dim y: %d\n"
.size .L.str.5, 23
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Max threads dim z: %d\n"
.size .L.str.6, 23
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Number of threads in a warp: %d\n"
.size .L.str.7, 33
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Max memory (GB) on this device: %d\n"
.size .L.str.8, 36
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Max shared memory (KB) per block: %d\n"
.size .L.str.9, 38
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Total constant memory (KB): %d\n"
.size .L.str.10, 32
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "ERROR: %s\n"
.size .L.str.11, 11
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // Dan Rolfe
#define BLOCKSIZE 32
// general structure for version 1 requested by prof
// section 5.3 of the cuda pogramming guide
/**
* first load from device mem to shared mem
* sync threads after read
* do the processing from shared mem
* sync threads after processing
* write the results back to device mem
**/
// gpu version 1 of the sobel code
__global__ void d_sobel1(int* __restrict__ result, unsigned int* __restrict__ pic, int xsize, int ysize, int thresh)
{
int sum1, sum2, magnitude;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//printf("xindex = %d, yindex = %d \n", i, j);
if (i>0 && j>0 && i<479 &&j<383)
{
//if (j > 470)
// printf("xindex = %d, yindex = %d, result length = %d, pic length = %d \n", i, j, sizeof(result), sizeof(pic));
int index = i * xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
// if (i >=383 && j >=479 )
// printf("result value at i = %d, j = %d, is %d ... index is %d magnitude = %d, thresh = %d\n", i, j, result[index], index, magnitude, thresh);
}
}
__global__ void d_sobel2(int* __restrict__ result, unsigned int* __restrict__ pic, int width, int height, int thresh)
{
int sum1, sum2, magnitude;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int index = x * width +y ;
//printf("x = %d, y = %d \n", x, y);
//if (x>0 && y>0 && x<height -1 &&y<width -1)
__shared__ unsigned int pic_s[BLOCKSIZE+2][BLOCKSIZE+2];
int threadX = threadIdx.x;
int threadY = threadIdx.y;
pic_s[threadX+1][threadY+1] = pic[index];
// top left corner
if(threadX <1 && threadY <1)
pic_s[threadX][threadY] = pic[(x-1)*width + y-1];
// top right corner
if(threadX <1 && threadY > BLOCKSIZE -2)
pic_s[threadX][threadY+2] = pic[(x-1)*width + y+1];
// bottom left corner
if(threadX > BLOCKSIZE -2 && threadY <1)
pic_s[threadX+2][threadY] = pic[(x+1)*width + y-1];
// bottom right corner
if(threadX > BLOCKSIZE -2 && threadY > BLOCKSIZE -2)
pic_s[threadX+2][threadY+2] = pic[(x+1)*width + y+1];
// top edge
if (threadX < 1)
pic_s[threadX][threadY+1] = pic[(x-1)*width + y];
// bottom edge
if (threadX > BLOCKSIZE -2)
pic_s[threadX+2][threadY+1] = pic[(x+1)*width + y];
// left edge
if (threadY < 1)
pic_s[threadX+1][threadY] = pic[(x)*width + y-1];
// right edge
if (threadY > BLOCKSIZE -2)
pic_s[threadX+1][threadY+2] = pic[(x)*width + y+1];
//printf("after pics \n");
__syncthreads();
sum1 = pic_s[threadX][threadY+2] - pic_s[threadX][threadY]
+ 2 * pic_s[threadX+1][threadY+2] - 2 * pic_s[threadX+1][threadY]
+ pic_s[threadX+2][threadY+2] - pic_s[threadX+2][threadY];
sum2 = pic_s[threadX][threadY] + 2 * pic_s[threadX][threadY+1] + pic_s[threadX][threadY+2]
- pic_s[threadX+2][threadY] - 2 * pic_s[threadX+2][threadY+1] - pic_s[threadX+2][threadY+2];
magnitude = sum1*sum1 + sum2*sum2;
__syncthreads();
//printf(" index = %d, sum1 = %d, sum2 = %d, magnitude = %d \n", index, sum1, sum2, magnitude);
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
if (x ==0 || y ==0 || x==height-1 || y == width-1)
result[index] = 0;
} | code for sm_80
Function : _Z8d_sobel2PiPjiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e280000002100 */
/*0050*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000e680000002200 */
/*0060*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000ea20000002600 */
/*0070*/ UMOV UR5, 0x1 ; /* 0x0000000100057882 */
/* 0x000fc40000000000 */
/*0080*/ ULDC.64 UR6, c[0x0][0x170] ; /* 0x00005c0000067ab9 */
/* 0x000fe20000000a00 */
/*0090*/ IMAD R3, R3, c[0x0][0x0], R6 ; /* 0x0000000003037a24 */
/* 0x001fe200078e0206 */
/*00a0*/ ISETP.GT.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x002fc80003f24270 */
/*00b0*/ IADD3 R5, R3, -0x1, RZ ; /* 0xffffffff03057810 */
/* 0x000fe20007ffe0ff */
/*00c0*/ IMAD R0, R0, c[0x0][0x4], R7 ; /* 0x0000010000007a24 */
/* 0x004fe200078e0207 */
/*00d0*/ ISETP.GT.OR P5, PT, R6, RZ, P1 ; /* 0x000000ff0600720c */
/* 0x000fc60000fa4670 */
/*00e0*/ IMAD R5, R5, c[0x0][0x170], R0 ; /* 0x00005c0005057a24 */
/* 0x000fc800078e0200 */
/*00f0*/ IMAD.WIDE R8, R5, R11, c[0x0][0x168] ; /* 0x00005a0005087625 */
/* 0x000fcc00078e020b */
/*0100*/ @!P5 LDG.E.CONSTANT R5, [R8.64+-0x4] ; /* 0xfffffc080805d981 */
/* 0x000ea2000c1e9900 */
/*0110*/ IMAD R4, R6.reuse, 0x22, R7 ; /* 0x0000002206047824 */
/* 0x040fe200078e0207 */
/*0120*/ ISETP.GE.AND P0, PT, R7, 0x1f, PT ; /* 0x0000001f0700780c */
/* 0x000fe20003f06270 */
/*0130*/ IMAD R2, R3, c[0x0][0x170], R0 ; /* 0x00005c0003027a24 */
/* 0x000fe200078e0200 */
/*0140*/ ISETP.GT.AND P2, PT, R7, 0x1e, PT ; /* 0x0000001e0700780c */
/* 0x000fe40003f44270 */
/*0150*/ ISETP.GT.OR P3, PT, R6.reuse, RZ, !P0 ; /* 0x000000ff0600720c */
/* 0x040fe40004764670 */
/*0160*/ IADD3 R7, R3, 0x1, RZ ; /* 0x0000000103077810 */
/* 0x000fe40007ffe0ff */
/*0170*/ ISETP.GT.AND P4, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fc40003f84270 */
/*0180*/ ISETP.LT.OR P0, PT, R6.reuse, 0x1f, !P0 ; /* 0x0000001f0600780c */
/* 0x040fe20004701670 */
/*0190*/ IMAD R10, R7, c[0x0][0x170], R0 ; /* 0x00005c00070a7a24 */
/* 0x000fe200078e0200 */
/*01a0*/ ISETP.GE.AND P6, PT, R6, 0x1f, PT ; /* 0x0000001f0600780c */
/* 0x000fca0003fc6270 */
/*01b0*/ @!P3 LDG.E.CONSTANT R13, [R8.64+0x4] ; /* 0x00000408080db981 */
/* 0x000ee8000c1e9900 */
/*01c0*/ @!P4 LDG.E.CONSTANT R19, [R8.64] ; /* 0x000000080813c981 */
/* 0x000f28000c1e9900 */
/*01d0*/ @!P5 STS [R4.X4], R5 ; /* 0x000000050400d388 */
/* 0x0041e20000004800 */
/*01e0*/ ISETP.LT.OR P5, PT, R6, 0x1f, P1 ; /* 0x0000001f0600780c */
/* 0x000fe20000fa1670 */
/*01f0*/ IMAD.WIDE R6, R2, R11, c[0x0][0x168] ; /* 0x00005a0002067625 */
/* 0x000fc800078e020b */
/*0200*/ IMAD.WIDE R10, R10, R11, c[0x0][0x168] ; /* 0x00005a000a0a7625 */
/* 0x000fe200078e020b */
/*0210*/ @!P1 LDG.E.CONSTANT R23, [R6.64+-0x4] ; /* 0xfffffc0806179981 */
/* 0x000ea8000c1e9900 */
/*0220*/ LDG.E.CONSTANT R5, [R6.64] ; /* 0x0000000806057981 */
/* 0x001f68000c1e9900 */
/*0230*/ @P2 LDG.E.CONSTANT R25, [R6.64+0x4] ; /* 0x0000040806192981 */
/* 0x000f68000c1e9900 */
/*0240*/ @!P5 LDG.E.CONSTANT R15, [R10.64+-0x4] ; /* 0xfffffc080a0fd981 */
/* 0x000f68000c1e9900 */
/*0250*/ @!P0 LDG.E.CONSTANT R17, [R10.64+0x4] ; /* 0x000004080a118981 */
/* 0x000f68000c1e9900 */
/*0260*/ @P6 LDG.E.CONSTANT R21, [R10.64] ; /* 0x000000080a156981 */
/* 0x000f68000c1e9900 */
/*0270*/ @!P3 STS [R4.X4+0x8], R13 ; /* 0x0000080d0400b388 */
/* 0x008fe80000004800 */
/*0280*/ @!P4 STS [R4.X4+0x4], R19 ; /* 0x000004130400c388 */
/* 0x010fe20000004800 */
/*0290*/ UIADD3 UR4, -UR5, UR7, URZ ; /* 0x0000000705047290 */
/* 0x000fc4000fffe13f */
/*02a0*/ UIADD3 UR5, -UR5, UR6, URZ ; /* 0x0000000605057290 */
/* 0x000fe2000fffe13f */
/*02b0*/ @!P1 STS [R4.X4+0x88], R23 ; /* 0x0000881704009388 */
/* 0x004fe80000004800 */
/*02c0*/ STS [R4.X4+0x8c], R5 ; /* 0x00008c0504007388 */
/* 0x020fe80000004800 */
/*02d0*/ @P2 STS [R4.X4+0x90], R25 ; /* 0x0000901904002388 */
/* 0x000fe80000004800 */
/*02e0*/ @!P5 STS [R4.X4+0x110], R15 ; /* 0x0001100f0400d388 */
/* 0x000fe80000004800 */
/*02f0*/ @!P0 STS [R4.X4+0x118], R17 ; /* 0x0001181104008388 */
/* 0x000fe80000004800 */
/*0300*/ @P6 STS [R4.X4+0x114], R21 ; /* 0x0001141504006388 */
/* 0x000fe80000004800 */
/*0310*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0320*/ LDS R6, [R4.X4] ; /* 0x0000000004067984 */
/* 0x000fe80000004800 */
/*0330*/ LDS R7, [R4.X4+0x8] ; /* 0x0000080004077984 */
/* 0x000e280000004800 */
/*0340*/ LDS R9, [R4.X4+0x4] ; /* 0x0000040004097984 */
/* 0x000e680000004800 */
/*0350*/ LDS R8, [R4.X4+0x90] ; /* 0x0000900004087984 */
/* 0x000ea80000004800 */
/*0360*/ LDS R10, [R4.X4+0x88] ; /* 0x00008800040a7984 */
/* 0x000ee80000004800 */
/*0370*/ LDS R14, [R4.X4+0x110] ; /* 0x00011000040e7984 */
/* 0x000f280000004800 */
/*0380*/ LDS R12, [R4.X4+0x118] ; /* 0x00011800040c7984 */
/* 0x000f680000004800 */
/*0390*/ LDS R16, [R4.X4+0x114] ; /* 0x0001140004107984 */
/* 0x000f680000004800 */
/*03a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*03b0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f05270 */
/*03c0*/ IMAD.IADD R5, R7, 0x1, -R6 ; /* 0x0000000107057824 */
/* 0x001fc400078e0a06 */
/*03d0*/ IMAD R9, R9, 0x2, R6 ; /* 0x0000000209097824 */
/* 0x002fe400078e0206 */
/*03e0*/ IMAD R5, R8, 0x2, R5 ; /* 0x0000000208057824 */
/* 0x004fc800078e0205 */
/*03f0*/ IMAD R5, R10, -0x2, R5 ; /* 0xfffffffe0a057824 */
/* 0x008fe200078e0205 */
/*0400*/ IADD3 R9, -R14.reuse, R9, R7 ; /* 0x000000090e097210 */
/* 0x050fe40007ffe107 */
/*0410*/ ISETP.NE.AND P0, PT, R3.reuse, RZ, P0 ; /* 0x000000ff0300720c */
/* 0x040fe40000705270 */
/*0420*/ IADD3 R5, -R14, R5, R12 ; /* 0x000000050e057210 */
/* 0x020fe20007ffe10c */
/*0430*/ IMAD R9, R16, -0x2, R9 ; /* 0xfffffffe10097824 */
/* 0x000fe200078e0209 */
/*0440*/ ISETP.NE.AND P0, PT, R3, UR4, P0 ; /* 0x0000000403007c0c */
/* 0x000fc60008705270 */
/*0450*/ IMAD R4, R5, R5, RZ ; /* 0x0000000505047224 */
/* 0x000fe400078e02ff */
/*0460*/ IMAD.IADD R9, R9, 0x1, -R12 ; /* 0x0000000109097824 */
/* 0x000fe200078e0a0c */
/*0470*/ ISETP.NE.AND P0, PT, R0, UR5, P0 ; /* 0x0000000500007c0c */
/* 0x000fc60008705270 */
/*0480*/ IMAD R9, R9, R9, R4 ; /* 0x0000000909097224 */
/* 0x000fe200078e0204 */
/*0490*/ SHF.R.S32.HI R3, RZ, 0x1f, R2 ; /* 0x0000001fff037819 */
/* 0x000fe40000011402 */
/*04a0*/ LEA R4, P1, R2.reuse, c[0x0][0x160], 0x2 ; /* 0x0000580002047a11 */
/* 0x040fe400078210ff */
/*04b0*/ ISETP.GT.AND P0, PT, R9, c[0x0][0x178], P0 ; /* 0x00005e0009007a0c */
/* 0x000fe40000704270 */
/*04c0*/ LEA.HI.X R5, R2, c[0x0][0x164], R3, 0x2, P1 ; /* 0x0000590002057a11 */
/* 0x000fe400008f1403 */
/*04d0*/ SEL R3, RZ, 0xff, !P0 ; /* 0x000000ffff037807 */
/* 0x000fca0004000000 */
/*04e0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x000fe2000c101908 */
/*04f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0500*/ BRA 0x500; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z8d_sobel1PiPjiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fc400078e0203 */
/*0060*/ IMAD R3, R0, c[0x0][0x4], R5 ; /* 0x0000010000037a24 */
/* 0x002fc600078e0205 */
/*0070*/ IADD3 R0, R2, -0x1, RZ ; /* 0xffffffff02007810 */
/* 0x000fe40007ffe0ff */
/*0080*/ IADD3 R4, R3, -0x1, RZ ; /* 0xffffffff03047810 */
/* 0x000fe40007ffe0ff */
/*0090*/ ISETP.GT.U32.AND P0, PT, R0, 0x1dd, PT ; /* 0x000001dd0000780c */
/* 0x000fc80003f04070 */
/*00a0*/ ISETP.GT.U32.OR P0, PT, R4, 0x17d, P0 ; /* 0x0000017d0400780c */
/* 0x000fda0000704470 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fe20000000f00 */
/*00d0*/ HFMA2.MMA R0, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff007435 */
/* 0x000fe200000001ff */
/*00e0*/ IMAD R9, R2.reuse, c[0x0][0x170], R3 ; /* 0x00005c0002097a24 */
/* 0x040fe200078e0203 */
/*00f0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0100*/ IMAD R4, R2, R7, -c[0x0][0x170] ; /* 0x80005c0002047624 */
/* 0x000fca00078e0207 */
/*0110*/ IADD3 R5, R3, R4, RZ ; /* 0x0000000403057210 */
/* 0x000fca0007ffe0ff */
/*0120*/ IMAD.WIDE R2, R5, R0, c[0x0][0x168] ; /* 0x00005a0005027625 */
/* 0x000fc800078e0200 */
/*0130*/ IMAD R7, R7, 0x2, R5 ; /* 0x0000000207077824 */
/* 0x000fe200078e0205 */
/*0140*/ LDG.E.CONSTANT R8, [R2.64+-0x4] ; /* 0xfffffc0402087981 */
/* 0x000ea2000c1e9900 */
/*0150*/ IMAD.WIDE R4, R9, R0, c[0x0][0x168] ; /* 0x00005a0009047625 */
/* 0x000fc600078e0200 */
/*0160*/ LDG.E.CONSTANT R11, [R2.64+0x4] ; /* 0x00000404020b7981 */
/* 0x000ea2000c1e9900 */
/*0170*/ IMAD.WIDE R6, R7, R0, c[0x0][0x168] ; /* 0x00005a0007067625 */
/* 0x000fc600078e0200 */
/*0180*/ LDG.E.CONSTANT R19, [R2.64] ; /* 0x0000000402137981 */
/* 0x000ee8000c1e9900 */
/*0190*/ LDG.E.CONSTANT R13, [R4.64+0x4] ; /* 0x00000404040d7981 */
/* 0x000f28000c1e9900 */
/*01a0*/ LDG.E.CONSTANT R12, [R6.64+-0x4] ; /* 0xfffffc04060c7981 */
/* 0x000f68000c1e9900 */
/*01b0*/ LDG.E.CONSTANT R15, [R4.64+-0x4] ; /* 0xfffffc04040f7981 */
/* 0x000f68000c1e9900 */
/*01c0*/ LDG.E.CONSTANT R14, [R6.64] ; /* 0x00000004060e7981 */
/* 0x000f68000c1e9900 */
/*01d0*/ LDG.E.CONSTANT R17, [R6.64+0x4] ; /* 0x0000040406117981 */
/* 0x000f62000c1e9900 */
/*01e0*/ IADD3 R10, -R8, R11, RZ ; /* 0x0000000b080a7210 */
/* 0x004fc40007ffe1ff */
/*01f0*/ LEA R19, R19, R8, 0x1 ; /* 0x0000000813137211 */
/* 0x008fc600078e08ff */
/*0200*/ IMAD R10, R13, 0x2, R10 ; /* 0x000000020d0a7824 */
/* 0x010fe200078e020a */
/*0210*/ IADD3 R19, -R12, R19, R11 ; /* 0x000000130c137210 */
/* 0x020fc60007ffe10b */
/*0220*/ IMAD R10, R15, -0x2, R10 ; /* 0xfffffffe0f0a7824 */
/* 0x000fe400078e020a */
/*0230*/ IMAD R14, R14, -0x2, R19 ; /* 0xfffffffe0e0e7824 */
/* 0x000fc600078e0213 */
/*0240*/ IADD3 R10, -R12, R10, R17 ; /* 0x0000000a0c0a7210 */
/* 0x000fe40007ffe111 */
/*0250*/ IADD3 R14, -R17, R14, RZ ; /* 0x0000000e110e7210 */
/* 0x000fc60007ffe1ff */
/*0260*/ IMAD R3, R10, R10, RZ ; /* 0x0000000a0a037224 */
/* 0x000fc800078e02ff */
/*0270*/ IMAD R3, R14, R14, R3 ; /* 0x0000000e0e037224 */
/* 0x000fca00078e0203 */
/*0280*/ ISETP.GT.AND P0, PT, R3, c[0x0][0x178], PT ; /* 0x00005e0003007a0c */
/* 0x000fe20003f04270 */
/*0290*/ IMAD.WIDE R2, R9, R0, c[0x0][0x160] ; /* 0x0000580009027625 */
/* 0x000fd800078e0200 */
/*02a0*/ @!P0 STG.E [R2.64], RZ ; /* 0x000000ff02008986 */
/* 0x0001e2000c101904 */
/*02b0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*02c0*/ MOV R5, 0xff ; /* 0x000000ff00057802 */
/* 0x000fca0000000f00 */
/*02d0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*02e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02f0*/ BRA 0x2f0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Dan Rolfe
#define BLOCKSIZE 32
// general structure for version 1 requested by prof
// section 5.3 of the cuda pogramming guide
/**
* first load from device mem to shared mem
* sync threads after read
* do the processing from shared mem
* sync threads after processing
* write the results back to device mem
**/
// gpu version 1 of the sobel code
__global__ void d_sobel1(int* __restrict__ result, unsigned int* __restrict__ pic, int xsize, int ysize, int thresh)
{
int sum1, sum2, magnitude;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//printf("xindex = %d, yindex = %d \n", i, j);
if (i>0 && j>0 && i<479 &&j<383)
{
//if (j > 470)
// printf("xindex = %d, yindex = %d, result length = %d, pic length = %d \n", i, j, sizeof(result), sizeof(pic));
int index = i * xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
// if (i >=383 && j >=479 )
// printf("result value at i = %d, j = %d, is %d ... index is %d magnitude = %d, thresh = %d\n", i, j, result[index], index, magnitude, thresh);
}
}
__global__ void d_sobel2(int* __restrict__ result, unsigned int* __restrict__ pic, int width, int height, int thresh)
{
int sum1, sum2, magnitude;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int index = x * width +y ;
//printf("x = %d, y = %d \n", x, y);
//if (x>0 && y>0 && x<height -1 &&y<width -1)
__shared__ unsigned int pic_s[BLOCKSIZE+2][BLOCKSIZE+2];
int threadX = threadIdx.x;
int threadY = threadIdx.y;
pic_s[threadX+1][threadY+1] = pic[index];
// top left corner
if(threadX <1 && threadY <1)
pic_s[threadX][threadY] = pic[(x-1)*width + y-1];
// top right corner
if(threadX <1 && threadY > BLOCKSIZE -2)
pic_s[threadX][threadY+2] = pic[(x-1)*width + y+1];
// bottom left corner
if(threadX > BLOCKSIZE -2 && threadY <1)
pic_s[threadX+2][threadY] = pic[(x+1)*width + y-1];
// bottom right corner
if(threadX > BLOCKSIZE -2 && threadY > BLOCKSIZE -2)
pic_s[threadX+2][threadY+2] = pic[(x+1)*width + y+1];
// top edge
if (threadX < 1)
pic_s[threadX][threadY+1] = pic[(x-1)*width + y];
// bottom edge
if (threadX > BLOCKSIZE -2)
pic_s[threadX+2][threadY+1] = pic[(x+1)*width + y];
// left edge
if (threadY < 1)
pic_s[threadX+1][threadY] = pic[(x)*width + y-1];
// right edge
if (threadY > BLOCKSIZE -2)
pic_s[threadX+1][threadY+2] = pic[(x)*width + y+1];
//printf("after pics \n");
__syncthreads();
sum1 = pic_s[threadX][threadY+2] - pic_s[threadX][threadY]
+ 2 * pic_s[threadX+1][threadY+2] - 2 * pic_s[threadX+1][threadY]
+ pic_s[threadX+2][threadY+2] - pic_s[threadX+2][threadY];
sum2 = pic_s[threadX][threadY] + 2 * pic_s[threadX][threadY+1] + pic_s[threadX][threadY+2]
- pic_s[threadX+2][threadY] - 2 * pic_s[threadX+2][threadY+1] - pic_s[threadX+2][threadY+2];
magnitude = sum1*sum1 + sum2*sum2;
__syncthreads();
//printf(" index = %d, sum1 = %d, sum2 = %d, magnitude = %d \n", index, sum1, sum2, magnitude);
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
if (x ==0 || y ==0 || x==height-1 || y == width-1)
result[index] = 0;
} | .file "tmpxft_000571ff_00000000-6_cudaSobel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii
.type _Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii, @function
_Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq %rdi, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8d_sobel1PiPjiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii, .-_Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii
.globl _Z8d_sobel1PiPjiii
.type _Z8d_sobel1PiPjiii, @function
_Z8d_sobel1PiPjiii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8d_sobel1PiPjiii, .-_Z8d_sobel1PiPjiii
.globl _Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii
.type _Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii, @function
_Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq %rdi, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8d_sobel2PiPjiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii, .-_Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii
.globl _Z8d_sobel2PiPjiii
.type _Z8d_sobel2PiPjiii, @function
_Z8d_sobel2PiPjiii:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z8d_sobel2PiPjiii, .-_Z8d_sobel2PiPjiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8d_sobel2PiPjiii"
.LC1:
.string "_Z8d_sobel1PiPjiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8d_sobel2PiPjiii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z8d_sobel1PiPjiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Dan Rolfe
#define BLOCKSIZE 32
// general structure for version 1 requested by prof
// section 5.3 of the cuda pogramming guide
/**
* first load from device mem to shared mem
* sync threads after read
* do the processing from shared mem
* sync threads after processing
* write the results back to device mem
**/
// gpu version 1 of the sobel code
__global__ void d_sobel1(int* __restrict__ result, unsigned int* __restrict__ pic, int xsize, int ysize, int thresh)
{
int sum1, sum2, magnitude;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//printf("xindex = %d, yindex = %d \n", i, j);
if (i>0 && j>0 && i<479 &&j<383)
{
//if (j > 470)
// printf("xindex = %d, yindex = %d, result length = %d, pic length = %d \n", i, j, sizeof(result), sizeof(pic));
int index = i * xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
// if (i >=383 && j >=479 )
// printf("result value at i = %d, j = %d, is %d ... index is %d magnitude = %d, thresh = %d\n", i, j, result[index], index, magnitude, thresh);
}
}
__global__ void d_sobel2(int* __restrict__ result, unsigned int* __restrict__ pic, int width, int height, int thresh)
{
int sum1, sum2, magnitude;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int index = x * width +y ;
//printf("x = %d, y = %d \n", x, y);
//if (x>0 && y>0 && x<height -1 &&y<width -1)
__shared__ unsigned int pic_s[BLOCKSIZE+2][BLOCKSIZE+2];
int threadX = threadIdx.x;
int threadY = threadIdx.y;
pic_s[threadX+1][threadY+1] = pic[index];
// top left corner
if(threadX <1 && threadY <1)
pic_s[threadX][threadY] = pic[(x-1)*width + y-1];
// top right corner
if(threadX <1 && threadY > BLOCKSIZE -2)
pic_s[threadX][threadY+2] = pic[(x-1)*width + y+1];
// bottom left corner
if(threadX > BLOCKSIZE -2 && threadY <1)
pic_s[threadX+2][threadY] = pic[(x+1)*width + y-1];
// bottom right corner
if(threadX > BLOCKSIZE -2 && threadY > BLOCKSIZE -2)
pic_s[threadX+2][threadY+2] = pic[(x+1)*width + y+1];
// top edge
if (threadX < 1)
pic_s[threadX][threadY+1] = pic[(x-1)*width + y];
// bottom edge
if (threadX > BLOCKSIZE -2)
pic_s[threadX+2][threadY+1] = pic[(x+1)*width + y];
// left edge
if (threadY < 1)
pic_s[threadX+1][threadY] = pic[(x)*width + y-1];
// right edge
if (threadY > BLOCKSIZE -2)
pic_s[threadX+1][threadY+2] = pic[(x)*width + y+1];
//printf("after pics \n");
__syncthreads();
sum1 = pic_s[threadX][threadY+2] - pic_s[threadX][threadY]
+ 2 * pic_s[threadX+1][threadY+2] - 2 * pic_s[threadX+1][threadY]
+ pic_s[threadX+2][threadY+2] - pic_s[threadX+2][threadY];
sum2 = pic_s[threadX][threadY] + 2 * pic_s[threadX][threadY+1] + pic_s[threadX][threadY+2]
- pic_s[threadX+2][threadY] - 2 * pic_s[threadX+2][threadY+1] - pic_s[threadX+2][threadY+2];
magnitude = sum1*sum1 + sum2*sum2;
__syncthreads();
//printf(" index = %d, sum1 = %d, sum2 = %d, magnitude = %d \n", index, sum1, sum2, magnitude);
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
if (x ==0 || y ==0 || x==height-1 || y == width-1)
result[index] = 0;
} | #include <hip/hip_runtime.h>
// Dan Rolfe
#define BLOCKSIZE 32
// general structure for version 1 requested by prof
// section 5.3 of the cuda pogramming guide
/**
* first load from device mem to shared mem
* sync threads after read
* do the processing from shared mem
* sync threads after processing
* write the results back to device mem
**/
// gpu version 1 of the sobel code
__global__ void d_sobel1(int* __restrict__ result, unsigned int* __restrict__ pic, int xsize, int ysize, int thresh)
{
int sum1, sum2, magnitude;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//printf("xindex = %d, yindex = %d \n", i, j);
if (i>0 && j>0 && i<479 &&j<383)
{
//if (j > 470)
// printf("xindex = %d, yindex = %d, result length = %d, pic length = %d \n", i, j, sizeof(result), sizeof(pic));
int index = i * xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
// if (i >=383 && j >=479 )
// printf("result value at i = %d, j = %d, is %d ... index is %d magnitude = %d, thresh = %d\n", i, j, result[index], index, magnitude, thresh);
}
}
__global__ void d_sobel2(int* __restrict__ result, unsigned int* __restrict__ pic, int width, int height, int thresh)
{
int sum1, sum2, magnitude;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int index = x * width +y ;
//printf("x = %d, y = %d \n", x, y);
//if (x>0 && y>0 && x<height -1 &&y<width -1)
__shared__ unsigned int pic_s[BLOCKSIZE+2][BLOCKSIZE+2];
int threadX = threadIdx.x;
int threadY = threadIdx.y;
pic_s[threadX+1][threadY+1] = pic[index];
// top left corner
if(threadX <1 && threadY <1)
pic_s[threadX][threadY] = pic[(x-1)*width + y-1];
// top right corner
if(threadX <1 && threadY > BLOCKSIZE -2)
pic_s[threadX][threadY+2] = pic[(x-1)*width + y+1];
// bottom left corner
if(threadX > BLOCKSIZE -2 && threadY <1)
pic_s[threadX+2][threadY] = pic[(x+1)*width + y-1];
// bottom right corner
if(threadX > BLOCKSIZE -2 && threadY > BLOCKSIZE -2)
pic_s[threadX+2][threadY+2] = pic[(x+1)*width + y+1];
// top edge
if (threadX < 1)
pic_s[threadX][threadY+1] = pic[(x-1)*width + y];
// bottom edge
if (threadX > BLOCKSIZE -2)
pic_s[threadX+2][threadY+1] = pic[(x+1)*width + y];
// left edge
if (threadY < 1)
pic_s[threadX+1][threadY] = pic[(x)*width + y-1];
// right edge
if (threadY > BLOCKSIZE -2)
pic_s[threadX+1][threadY+2] = pic[(x)*width + y+1];
//printf("after pics \n");
__syncthreads();
sum1 = pic_s[threadX][threadY+2] - pic_s[threadX][threadY]
+ 2 * pic_s[threadX+1][threadY+2] - 2 * pic_s[threadX+1][threadY]
+ pic_s[threadX+2][threadY+2] - pic_s[threadX+2][threadY];
sum2 = pic_s[threadX][threadY] + 2 * pic_s[threadX][threadY+1] + pic_s[threadX][threadY+2]
- pic_s[threadX+2][threadY] - 2 * pic_s[threadX+2][threadY+1] - pic_s[threadX+2][threadY+2];
magnitude = sum1*sum1 + sum2*sum2;
__syncthreads();
//printf(" index = %d, sum1 = %d, sum2 = %d, magnitude = %d \n", index, sum1, sum2, magnitude);
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
if (x ==0 || y ==0 || x==height-1 || y == width-1)
result[index] = 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
// Dan Rolfe
#define BLOCKSIZE 32
// general structure for version 1 requested by prof
// section 5.3 of the cuda pogramming guide
/**
* first load from device mem to shared mem
* sync threads after read
* do the processing from shared mem
* sync threads after processing
* write the results back to device mem
**/
// gpu version 1 of the sobel code
__global__ void d_sobel1(int* __restrict__ result, unsigned int* __restrict__ pic, int xsize, int ysize, int thresh)
{
int sum1, sum2, magnitude;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//printf("xindex = %d, yindex = %d \n", i, j);
if (i>0 && j>0 && i<479 &&j<383)
{
//if (j > 470)
// printf("xindex = %d, yindex = %d, result length = %d, pic length = %d \n", i, j, sizeof(result), sizeof(pic));
int index = i * xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
// if (i >=383 && j >=479 )
// printf("result value at i = %d, j = %d, is %d ... index is %d magnitude = %d, thresh = %d\n", i, j, result[index], index, magnitude, thresh);
}
}
__global__ void d_sobel2(int* __restrict__ result, unsigned int* __restrict__ pic, int width, int height, int thresh)
{
int sum1, sum2, magnitude;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int index = x * width +y ;
//printf("x = %d, y = %d \n", x, y);
//if (x>0 && y>0 && x<height -1 &&y<width -1)
__shared__ unsigned int pic_s[BLOCKSIZE+2][BLOCKSIZE+2];
int threadX = threadIdx.x;
int threadY = threadIdx.y;
pic_s[threadX+1][threadY+1] = pic[index];
// top left corner
if(threadX <1 && threadY <1)
pic_s[threadX][threadY] = pic[(x-1)*width + y-1];
// top right corner
if(threadX <1 && threadY > BLOCKSIZE -2)
pic_s[threadX][threadY+2] = pic[(x-1)*width + y+1];
// bottom left corner
if(threadX > BLOCKSIZE -2 && threadY <1)
pic_s[threadX+2][threadY] = pic[(x+1)*width + y-1];
// bottom right corner
if(threadX > BLOCKSIZE -2 && threadY > BLOCKSIZE -2)
pic_s[threadX+2][threadY+2] = pic[(x+1)*width + y+1];
// top edge
if (threadX < 1)
pic_s[threadX][threadY+1] = pic[(x-1)*width + y];
// bottom edge
if (threadX > BLOCKSIZE -2)
pic_s[threadX+2][threadY+1] = pic[(x+1)*width + y];
// left edge
if (threadY < 1)
pic_s[threadX+1][threadY] = pic[(x)*width + y-1];
// right edge
if (threadY > BLOCKSIZE -2)
pic_s[threadX+1][threadY+2] = pic[(x)*width + y+1];
//printf("after pics \n");
__syncthreads();
sum1 = pic_s[threadX][threadY+2] - pic_s[threadX][threadY]
+ 2 * pic_s[threadX+1][threadY+2] - 2 * pic_s[threadX+1][threadY]
+ pic_s[threadX+2][threadY+2] - pic_s[threadX+2][threadY];
sum2 = pic_s[threadX][threadY] + 2 * pic_s[threadX][threadY+1] + pic_s[threadX][threadY+2]
- pic_s[threadX+2][threadY] - 2 * pic_s[threadX+2][threadY+1] - pic_s[threadX+2][threadY+2];
magnitude = sum1*sum1 + sum2*sum2;
__syncthreads();
//printf(" index = %d, sum1 = %d, sum2 = %d, magnitude = %d \n", index, sum1, sum2, magnitude);
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
if (x ==0 || y ==0 || x==height-1 || y == width-1)
result[index] = 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8d_sobel1PiPjiii
.globl _Z8d_sobel1PiPjiii
.p2align 8
.type _Z8d_sobel1PiPjiii,@function
_Z8d_sobel1PiPjiii:
s_load_b32 s2, s[0:1], 0x2c
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_add_nc_u32_e32 v2, -1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, -1, v1
v_cmp_gt_u32_e32 vcc_lo, 0x1de, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_u32_e64 s2, 0x17e, v3
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b32 s2, s[0:1], 0x10
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[3:4], null, v0, s2, v[1:2]
v_mad_u64_u32 v[5:6], null, v2, s2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v0, s2, v3
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[2:3], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[4:5], 2, v[5:6]
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
s_clause 0x5
global_load_b32 v10, v[0:1], off offset:-4
global_load_b32 v11, v[6:7], off offset:-4
global_load_b64 v[0:1], v[0:1], off
global_load_b64 v[8:9], v[4:5], off
global_load_b32 v4, v[4:5], off offset:-4
global_load_b32 v5, v[6:7], off offset:4
s_waitcnt vmcnt(2)
v_add_nc_u32_e32 v6, v9, v1
s_waitcnt vmcnt(1)
v_add_nc_u32_e32 v7, v4, v10
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v5, v5, v11
v_add_nc_u32_e32 v4, v4, v9
v_add_nc_u32_e32 v1, v1, v10
v_sub_nc_u32_e32 v6, v6, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v1, v4, v1
v_lshl_add_u32 v5, v5, 1, v6
v_sub_nc_u32_e32 v6, v8, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v0, v5, v5
v_lshl_add_u32 v1, v6, 1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[4:5], null, v1, v1, v[0:1]
v_add_co_u32 v0, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v3, vcc_lo
v_cmp_lt_i32_e32 vcc_lo, s0, v4
v_cndmask_b32_e64 v2, 0, 0xff, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8d_sobel1PiPjiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8d_sobel1PiPjiii, .Lfunc_end0-_Z8d_sobel1PiPjiii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z8d_sobel2PiPjiii
.globl _Z8d_sobel2PiPjiii
.p2align 8
.type _Z8d_sobel2PiPjiii,@function
_Z8d_sobel2PiPjiii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x10
v_and_b32_e32 v4, 0x3ff, v0
v_bfe_u32 v5, v0, 10, 10
s_load_b64 s[6:7], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v9, 1, v4
v_add_nc_u32_e32 v8, 1, v5
v_or_b32_e32 v12, v4, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v11, 2, v8
v_mad_u32_u24 v11, v9, 0x88, v11
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[0:1], null, s14, s3, v[4:5]
v_mad_u64_u32 v[1:2], null, s15, s2, v[5:6]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v0, s8, v[1:2]
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[2:3]
v_add_co_u32 v6, vcc_lo, s6, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
global_load_b32 v10, v[6:7], off
s_waitcnt vmcnt(0)
ds_store_b32 v11, v10
v_cmpx_eq_u32_e32 0, v12
s_cbranch_execz .LBB1_2
v_add_nc_u32_e32 v12, -1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[10:11], null, v12, s8, v[1:2]
v_ashrrev_i32_e32 v11, 31, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], 2, v[10:11]
v_add_co_u32 v10, vcc_lo, s6, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v11, vcc_lo
global_load_b32 v10, v[10:11], off offset:-4
v_lshlrev_b32_e32 v11, 2, v5
v_mad_u32_u24 v11, v4, 0x88, v11
s_waitcnt vmcnt(0)
ds_store_b32 v11, v10
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s2
v_cmp_eq_u32_e64 s3, 0, v4
v_cmp_lt_u32_e32 vcc_lo, 30, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s3, vcc_lo
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB1_4
v_add_nc_u32_e32 v12, -1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[10:11], null, v12, s8, v[1:2]
v_ashrrev_i32_e32 v11, 31, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], 2, v[10:11]
v_add_co_u32 v10, s2, s6, v10
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v11, s2, s7, v11, s2
global_load_b32 v10, v[10:11], off offset:4
v_lshlrev_b32_e32 v11, 2, v5
s_waitcnt vmcnt(0)
ds_store_b32 v11, v10 offset:8
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s4
v_mul_lo_u32 v10, s8, v0
v_cmp_eq_u32_e64 s2, 0, v5
v_cmp_lt_u32_e64 s4, 30, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s5, s4, s2
s_and_saveexec_b32 s9, s5
s_cbranch_execz .LBB1_6
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v11, v10, s8, v1
v_ashrrev_i32_e32 v12, 31, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[11:12], 2, v[11:12]
v_add_co_u32 v11, s5, s6, v11
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v12, s5, s7, v12, s5
global_load_b32 v11, v[11:12], off offset:-4
v_mul_u32_u24_e32 v12, 0x88, v4
s_waitcnt vmcnt(0)
ds_store_b32 v12, v11 offset:272
.LBB1_6:
s_or_b32 exec_lo, exec_lo, s9
s_and_b32 s5, s4, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s9, s5
s_cbranch_execz .LBB1_8
v_add3_u32 v11, v10, s8, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v12, 31, v11
v_lshlrev_b64 v[11:12], 2, v[11:12]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v11, s5, s6, v11
v_add_co_ci_u32_e64 v12, s5, s7, v12, s5
global_load_b32 v11, v[11:12], off offset:4
v_lshlrev_b32_e32 v12, 2, v5
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v12, v4, 0x88, v12
s_waitcnt vmcnt(0)
ds_store_b32 v12, v11 offset:280
.LBB1_8:
s_or_b32 exec_lo, exec_lo, s9
s_and_saveexec_b32 s5, s3
s_cbranch_execz .LBB1_10
v_add_nc_u32_e32 v13, -1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[11:12], null, v13, s8, v[1:2]
v_ashrrev_i32_e32 v12, 31, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[11:12], 2, v[11:12]
v_add_co_u32 v11, s3, s6, v11
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v12, s3, s7, v12, s3
global_load_b32 v11, v[11:12], off
v_lshlrev_b32_e32 v12, 2, v8
s_waitcnt vmcnt(0)
ds_store_b32 v12, v11
.LBB1_10:
s_or_b32 exec_lo, exec_lo, s5
s_and_saveexec_b32 s5, s4
s_cbranch_execz .LBB1_12
v_add3_u32 v10, v10, s8, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v11, 31, v10
v_lshlrev_b64 v[10:11], 2, v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v10, s3, s6, v10
v_add_co_ci_u32_e64 v11, s3, s7, v11, s3
global_load_b32 v10, v[10:11], off
v_lshlrev_b32_e32 v11, 2, v8
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v11, v4, 0x88, v11
s_waitcnt vmcnt(0)
ds_store_b32 v11, v10 offset:272
.LBB1_12:
s_or_b32 exec_lo, exec_lo, s5
s_load_b64 s[4:5], s[0:1], 0x0
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_14
global_load_b32 v10, v[6:7], off offset:-4
v_mul_u32_u24_e32 v11, 0x88, v9
s_waitcnt vmcnt(0)
ds_store_b32 v11, v10
.LBB1_14:
s_or_b32 exec_lo, exec_lo, s3
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB1_16
global_load_b32 v6, v[6:7], off offset:4
v_mul_u32_u24_e32 v7, 0x88, v9
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v7, v5, 2, v7
s_waitcnt vmcnt(0)
ds_store_b32 v7, v6 offset:8
.LBB1_16:
s_or_b32 exec_lo, exec_lo, s2
v_lshlrev_b32_e32 v5, 2, v5
s_movk_i32 s2, 0x110
s_waitcnt lgkmcnt(0)
v_mad_u32_u24 v6, v4, 0x88, s2
s_barrier
v_add_nc_u32_e32 v7, 8, v5
v_mad_u32_u24 v10, v4, 0x88, v5
buffer_gl0_inv
v_add_nc_u32_e32 v12, v6, v5
v_mad_u32_u24 v5, v9, 0x88, v5
v_add_nc_u32_e32 v11, v6, v7
v_mad_u32_u24 v13, v4, 0x88, v7
v_mad_u32_u24 v7, v9, 0x88, v7
v_lshlrev_b32_e32 v8, 2, v8
s_load_b64 s[6:7], s[0:1], 0x14
ds_load_b32 v9, v11
ds_load_b32 v11, v12
ds_load_b32 v12, v13
ds_load_b32 v10, v10
ds_load_b32 v7, v7
ds_load_b32 v5, v5
v_mad_u32_u24 v4, v4, 0x88, v8
v_add_nc_u32_e32 v6, v6, v8
ds_load_b32 v4, v4
ds_load_b32 v6, v6
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_cmp_eq_u32_e32 vcc_lo, 0, v0
v_cmp_eq_u32_e64 s0, 0, v1
s_add_i32 s8, s8, -1
s_waitcnt lgkmcnt(0)
v_cmp_eq_u32_e64 s1, s8, v1
s_barrier
s_or_b32 s0, vcc_lo, s0
buffer_gl0_inv
v_add_nc_u32_e32 v8, v12, v9
v_add_nc_u32_e32 v13, v10, v11
v_add_nc_u32_e32 v9, v9, v11
v_sub_nc_u32_e32 v5, v7, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_sub_nc_u32_e32 v7, v8, v13
v_add_nc_u32_e32 v8, v10, v12
v_sub_nc_u32_e32 v6, v4, v6
v_lshl_add_u32 v5, v5, 1, v7
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v7, v8, v9
v_mul_lo_u32 v4, v5, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v7, v6, 1, v7
v_mad_u64_u32 v[5:6], null, v7, v7, v[4:5]
v_add_co_u32 v6, s2, s4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_add_co_ci_u32_e64 v7, s2, s5, v3, s2
s_add_i32 s2, s6, -1
v_cmp_eq_u32_e64 s2, s2, v0
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_lt_i32_e64 s3, s7, v5
s_or_b32 s0, s0, s2
s_delay_alu instid0(VALU_DEP_1)
v_cndmask_b32_e64 v0, 0, 0xff, s3
s_or_b32 s0, s0, s1
global_store_b32 v[6:7], v0, off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB1_18
v_add_co_u32 v0, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v3, vcc_lo
v_mov_b32_e32 v2, 0
global_store_b32 v[0:1], v2, off
.LBB1_18:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8d_sobel2PiPjiii
.amdhsa_group_segment_fixed_size 4624
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z8d_sobel2PiPjiii, .Lfunc_end1-_Z8d_sobel2PiPjiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .actual_access: write_only
.address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .actual_access: read_only
.address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8d_sobel1PiPjiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8d_sobel1PiPjiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .actual_access: write_only
.address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .actual_access: read_only
.address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4624
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8d_sobel2PiPjiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8d_sobel2PiPjiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
// Dan Rolfe
#define BLOCKSIZE 32
// general structure for version 1 requested by prof
// section 5.3 of the cuda pogramming guide
/**
* first load from device mem to shared mem
* sync threads after read
* do the processing from shared mem
* sync threads after processing
* write the results back to device mem
**/
// gpu version 1 of the sobel code
__global__ void d_sobel1(int* __restrict__ result, unsigned int* __restrict__ pic, int xsize, int ysize, int thresh)
{
int sum1, sum2, magnitude;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//printf("xindex = %d, yindex = %d \n", i, j);
if (i>0 && j>0 && i<479 &&j<383)
{
//if (j > 470)
// printf("xindex = %d, yindex = %d, result length = %d, pic length = %d \n", i, j, sizeof(result), sizeof(pic));
int index = i * xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
// if (i >=383 && j >=479 )
// printf("result value at i = %d, j = %d, is %d ... index is %d magnitude = %d, thresh = %d\n", i, j, result[index], index, magnitude, thresh);
}
}
__global__ void d_sobel2(int* __restrict__ result, unsigned int* __restrict__ pic, int width, int height, int thresh)
{
int sum1, sum2, magnitude;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int index = x * width +y ;
//printf("x = %d, y = %d \n", x, y);
//if (x>0 && y>0 && x<height -1 &&y<width -1)
__shared__ unsigned int pic_s[BLOCKSIZE+2][BLOCKSIZE+2];
int threadX = threadIdx.x;
int threadY = threadIdx.y;
pic_s[threadX+1][threadY+1] = pic[index];
// top left corner
if(threadX <1 && threadY <1)
pic_s[threadX][threadY] = pic[(x-1)*width + y-1];
// top right corner
if(threadX <1 && threadY > BLOCKSIZE -2)
pic_s[threadX][threadY+2] = pic[(x-1)*width + y+1];
// bottom left corner
if(threadX > BLOCKSIZE -2 && threadY <1)
pic_s[threadX+2][threadY] = pic[(x+1)*width + y-1];
// bottom right corner
if(threadX > BLOCKSIZE -2 && threadY > BLOCKSIZE -2)
pic_s[threadX+2][threadY+2] = pic[(x+1)*width + y+1];
// top edge
if (threadX < 1)
pic_s[threadX][threadY+1] = pic[(x-1)*width + y];
// bottom edge
if (threadX > BLOCKSIZE -2)
pic_s[threadX+2][threadY+1] = pic[(x+1)*width + y];
// left edge
if (threadY < 1)
pic_s[threadX+1][threadY] = pic[(x)*width + y-1];
// right edge
if (threadY > BLOCKSIZE -2)
pic_s[threadX+1][threadY+2] = pic[(x)*width + y+1];
//printf("after pics \n");
__syncthreads();
sum1 = pic_s[threadX][threadY+2] - pic_s[threadX][threadY]
+ 2 * pic_s[threadX+1][threadY+2] - 2 * pic_s[threadX+1][threadY]
+ pic_s[threadX+2][threadY+2] - pic_s[threadX+2][threadY];
sum2 = pic_s[threadX][threadY] + 2 * pic_s[threadX][threadY+1] + pic_s[threadX][threadY+2]
- pic_s[threadX+2][threadY] - 2 * pic_s[threadX+2][threadY+1] - pic_s[threadX+2][threadY+2];
magnitude = sum1*sum1 + sum2*sum2;
__syncthreads();
//printf(" index = %d, sum1 = %d, sum2 = %d, magnitude = %d \n", index, sum1, sum2, magnitude);
if (magnitude > thresh)
result[index] = 255;
else
result[index] = 0;
if (x ==0 || y ==0 || x==height-1 || y == width-1)
result[index] = 0;
} | .text
.file "cudaSobel.hip"
.globl _Z23__device_stub__d_sobel1PiPjiii # -- Begin function _Z23__device_stub__d_sobel1PiPjiii
.p2align 4, 0x90
.type _Z23__device_stub__d_sobel1PiPjiii,@function
_Z23__device_stub__d_sobel1PiPjiii: # @_Z23__device_stub__d_sobel1PiPjiii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8d_sobel1PiPjiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z23__device_stub__d_sobel1PiPjiii, .Lfunc_end0-_Z23__device_stub__d_sobel1PiPjiii
.cfi_endproc
# -- End function
.globl _Z23__device_stub__d_sobel2PiPjiii # -- Begin function _Z23__device_stub__d_sobel2PiPjiii
.p2align 4, 0x90
.type _Z23__device_stub__d_sobel2PiPjiii,@function
_Z23__device_stub__d_sobel2PiPjiii: # @_Z23__device_stub__d_sobel2PiPjiii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8d_sobel2PiPjiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z23__device_stub__d_sobel2PiPjiii, .Lfunc_end1-_Z23__device_stub__d_sobel2PiPjiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8d_sobel1PiPjiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8d_sobel2PiPjiii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8d_sobel1PiPjiii,@object # @_Z8d_sobel1PiPjiii
.section .rodata,"a",@progbits
.globl _Z8d_sobel1PiPjiii
.p2align 3, 0x0
_Z8d_sobel1PiPjiii:
.quad _Z23__device_stub__d_sobel1PiPjiii
.size _Z8d_sobel1PiPjiii, 8
.type _Z8d_sobel2PiPjiii,@object # @_Z8d_sobel2PiPjiii
.globl _Z8d_sobel2PiPjiii
.p2align 3, 0x0
_Z8d_sobel2PiPjiii:
.quad _Z23__device_stub__d_sobel2PiPjiii
.size _Z8d_sobel2PiPjiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8d_sobel1PiPjiii"
.size .L__unnamed_1, 19
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z8d_sobel2PiPjiii"
.size .L__unnamed_2, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__d_sobel1PiPjiii
.addrsig_sym _Z23__device_stub__d_sobel2PiPjiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8d_sobel1PiPjiii
.addrsig_sym _Z8d_sobel2PiPjiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8d_sobel2PiPjiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e280000002100 */
/*0050*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000e680000002200 */
/*0060*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000ea20000002600 */
/*0070*/ UMOV UR5, 0x1 ; /* 0x0000000100057882 */
/* 0x000fc40000000000 */
/*0080*/ ULDC.64 UR6, c[0x0][0x170] ; /* 0x00005c0000067ab9 */
/* 0x000fe20000000a00 */
/*0090*/ IMAD R3, R3, c[0x0][0x0], R6 ; /* 0x0000000003037a24 */
/* 0x001fe200078e0206 */
/*00a0*/ ISETP.GT.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x002fc80003f24270 */
/*00b0*/ IADD3 R5, R3, -0x1, RZ ; /* 0xffffffff03057810 */
/* 0x000fe20007ffe0ff */
/*00c0*/ IMAD R0, R0, c[0x0][0x4], R7 ; /* 0x0000010000007a24 */
/* 0x004fe200078e0207 */
/*00d0*/ ISETP.GT.OR P5, PT, R6, RZ, P1 ; /* 0x000000ff0600720c */
/* 0x000fc60000fa4670 */
/*00e0*/ IMAD R5, R5, c[0x0][0x170], R0 ; /* 0x00005c0005057a24 */
/* 0x000fc800078e0200 */
/*00f0*/ IMAD.WIDE R8, R5, R11, c[0x0][0x168] ; /* 0x00005a0005087625 */
/* 0x000fcc00078e020b */
/*0100*/ @!P5 LDG.E.CONSTANT R5, [R8.64+-0x4] ; /* 0xfffffc080805d981 */
/* 0x000ea2000c1e9900 */
/*0110*/ IMAD R4, R6.reuse, 0x22, R7 ; /* 0x0000002206047824 */
/* 0x040fe200078e0207 */
/*0120*/ ISETP.GE.AND P0, PT, R7, 0x1f, PT ; /* 0x0000001f0700780c */
/* 0x000fe20003f06270 */
/*0130*/ IMAD R2, R3, c[0x0][0x170], R0 ; /* 0x00005c0003027a24 */
/* 0x000fe200078e0200 */
/*0140*/ ISETP.GT.AND P2, PT, R7, 0x1e, PT ; /* 0x0000001e0700780c */
/* 0x000fe40003f44270 */
/*0150*/ ISETP.GT.OR P3, PT, R6.reuse, RZ, !P0 ; /* 0x000000ff0600720c */
/* 0x040fe40004764670 */
/*0160*/ IADD3 R7, R3, 0x1, RZ ; /* 0x0000000103077810 */
/* 0x000fe40007ffe0ff */
/*0170*/ ISETP.GT.AND P4, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fc40003f84270 */
/*0180*/ ISETP.LT.OR P0, PT, R6.reuse, 0x1f, !P0 ; /* 0x0000001f0600780c */
/* 0x040fe20004701670 */
/*0190*/ IMAD R10, R7, c[0x0][0x170], R0 ; /* 0x00005c00070a7a24 */
/* 0x000fe200078e0200 */
/*01a0*/ ISETP.GE.AND P6, PT, R6, 0x1f, PT ; /* 0x0000001f0600780c */
/* 0x000fca0003fc6270 */
/*01b0*/ @!P3 LDG.E.CONSTANT R13, [R8.64+0x4] ; /* 0x00000408080db981 */
/* 0x000ee8000c1e9900 */
/*01c0*/ @!P4 LDG.E.CONSTANT R19, [R8.64] ; /* 0x000000080813c981 */
/* 0x000f28000c1e9900 */
/*01d0*/ @!P5 STS [R4.X4], R5 ; /* 0x000000050400d388 */
/* 0x0041e20000004800 */
/*01e0*/ ISETP.LT.OR P5, PT, R6, 0x1f, P1 ; /* 0x0000001f0600780c */
/* 0x000fe20000fa1670 */
/*01f0*/ IMAD.WIDE R6, R2, R11, c[0x0][0x168] ; /* 0x00005a0002067625 */
/* 0x000fc800078e020b */
/*0200*/ IMAD.WIDE R10, R10, R11, c[0x0][0x168] ; /* 0x00005a000a0a7625 */
/* 0x000fe200078e020b */
/*0210*/ @!P1 LDG.E.CONSTANT R23, [R6.64+-0x4] ; /* 0xfffffc0806179981 */
/* 0x000ea8000c1e9900 */
/*0220*/ LDG.E.CONSTANT R5, [R6.64] ; /* 0x0000000806057981 */
/* 0x001f68000c1e9900 */
/*0230*/ @P2 LDG.E.CONSTANT R25, [R6.64+0x4] ; /* 0x0000040806192981 */
/* 0x000f68000c1e9900 */
/*0240*/ @!P5 LDG.E.CONSTANT R15, [R10.64+-0x4] ; /* 0xfffffc080a0fd981 */
/* 0x000f68000c1e9900 */
/*0250*/ @!P0 LDG.E.CONSTANT R17, [R10.64+0x4] ; /* 0x000004080a118981 */
/* 0x000f68000c1e9900 */
/*0260*/ @P6 LDG.E.CONSTANT R21, [R10.64] ; /* 0x000000080a156981 */
/* 0x000f68000c1e9900 */
/*0270*/ @!P3 STS [R4.X4+0x8], R13 ; /* 0x0000080d0400b388 */
/* 0x008fe80000004800 */
/*0280*/ @!P4 STS [R4.X4+0x4], R19 ; /* 0x000004130400c388 */
/* 0x010fe20000004800 */
/*0290*/ UIADD3 UR4, -UR5, UR7, URZ ; /* 0x0000000705047290 */
/* 0x000fc4000fffe13f */
/*02a0*/ UIADD3 UR5, -UR5, UR6, URZ ; /* 0x0000000605057290 */
/* 0x000fe2000fffe13f */
/*02b0*/ @!P1 STS [R4.X4+0x88], R23 ; /* 0x0000881704009388 */
/* 0x004fe80000004800 */
/*02c0*/ STS [R4.X4+0x8c], R5 ; /* 0x00008c0504007388 */
/* 0x020fe80000004800 */
/*02d0*/ @P2 STS [R4.X4+0x90], R25 ; /* 0x0000901904002388 */
/* 0x000fe80000004800 */
/*02e0*/ @!P5 STS [R4.X4+0x110], R15 ; /* 0x0001100f0400d388 */
/* 0x000fe80000004800 */
/*02f0*/ @!P0 STS [R4.X4+0x118], R17 ; /* 0x0001181104008388 */
/* 0x000fe80000004800 */
/*0300*/ @P6 STS [R4.X4+0x114], R21 ; /* 0x0001141504006388 */
/* 0x000fe80000004800 */
/*0310*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0320*/ LDS R6, [R4.X4] ; /* 0x0000000004067984 */
/* 0x000fe80000004800 */
/*0330*/ LDS R7, [R4.X4+0x8] ; /* 0x0000080004077984 */
/* 0x000e280000004800 */
/*0340*/ LDS R9, [R4.X4+0x4] ; /* 0x0000040004097984 */
/* 0x000e680000004800 */
/*0350*/ LDS R8, [R4.X4+0x90] ; /* 0x0000900004087984 */
/* 0x000ea80000004800 */
/*0360*/ LDS R10, [R4.X4+0x88] ; /* 0x00008800040a7984 */
/* 0x000ee80000004800 */
/*0370*/ LDS R14, [R4.X4+0x110] ; /* 0x00011000040e7984 */
/* 0x000f280000004800 */
/*0380*/ LDS R12, [R4.X4+0x118] ; /* 0x00011800040c7984 */
/* 0x000f680000004800 */
/*0390*/ LDS R16, [R4.X4+0x114] ; /* 0x0001140004107984 */
/* 0x000f680000004800 */
/*03a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*03b0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f05270 */
/*03c0*/ IMAD.IADD R5, R7, 0x1, -R6 ; /* 0x0000000107057824 */
/* 0x001fc400078e0a06 */
/*03d0*/ IMAD R9, R9, 0x2, R6 ; /* 0x0000000209097824 */
/* 0x002fe400078e0206 */
/*03e0*/ IMAD R5, R8, 0x2, R5 ; /* 0x0000000208057824 */
/* 0x004fc800078e0205 */
/*03f0*/ IMAD R5, R10, -0x2, R5 ; /* 0xfffffffe0a057824 */
/* 0x008fe200078e0205 */
/*0400*/ IADD3 R9, -R14.reuse, R9, R7 ; /* 0x000000090e097210 */
/* 0x050fe40007ffe107 */
/*0410*/ ISETP.NE.AND P0, PT, R3.reuse, RZ, P0 ; /* 0x000000ff0300720c */
/* 0x040fe40000705270 */
/*0420*/ IADD3 R5, -R14, R5, R12 ; /* 0x000000050e057210 */
/* 0x020fe20007ffe10c */
/*0430*/ IMAD R9, R16, -0x2, R9 ; /* 0xfffffffe10097824 */
/* 0x000fe200078e0209 */
/*0440*/ ISETP.NE.AND P0, PT, R3, UR4, P0 ; /* 0x0000000403007c0c */
/* 0x000fc60008705270 */
/*0450*/ IMAD R4, R5, R5, RZ ; /* 0x0000000505047224 */
/* 0x000fe400078e02ff */
/*0460*/ IMAD.IADD R9, R9, 0x1, -R12 ; /* 0x0000000109097824 */
/* 0x000fe200078e0a0c */
/*0470*/ ISETP.NE.AND P0, PT, R0, UR5, P0 ; /* 0x0000000500007c0c */
/* 0x000fc60008705270 */
/*0480*/ IMAD R9, R9, R9, R4 ; /* 0x0000000909097224 */
/* 0x000fe200078e0204 */
/*0490*/ SHF.R.S32.HI R3, RZ, 0x1f, R2 ; /* 0x0000001fff037819 */
/* 0x000fe40000011402 */
/*04a0*/ LEA R4, P1, R2.reuse, c[0x0][0x160], 0x2 ; /* 0x0000580002047a11 */
/* 0x040fe400078210ff */
/*04b0*/ ISETP.GT.AND P0, PT, R9, c[0x0][0x178], P0 ; /* 0x00005e0009007a0c */
/* 0x000fe40000704270 */
/*04c0*/ LEA.HI.X R5, R2, c[0x0][0x164], R3, 0x2, P1 ; /* 0x0000590002057a11 */
/* 0x000fe400008f1403 */
/*04d0*/ SEL R3, RZ, 0xff, !P0 ; /* 0x000000ffff037807 */
/* 0x000fca0004000000 */
/*04e0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x000fe2000c101908 */
/*04f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0500*/ BRA 0x500; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z8d_sobel1PiPjiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fc400078e0203 */
/*0060*/ IMAD R3, R0, c[0x0][0x4], R5 ; /* 0x0000010000037a24 */
/* 0x002fc600078e0205 */
/*0070*/ IADD3 R0, R2, -0x1, RZ ; /* 0xffffffff02007810 */
/* 0x000fe40007ffe0ff */
/*0080*/ IADD3 R4, R3, -0x1, RZ ; /* 0xffffffff03047810 */
/* 0x000fe40007ffe0ff */
/*0090*/ ISETP.GT.U32.AND P0, PT, R0, 0x1dd, PT ; /* 0x000001dd0000780c */
/* 0x000fc80003f04070 */
/*00a0*/ ISETP.GT.U32.OR P0, PT, R4, 0x17d, P0 ; /* 0x0000017d0400780c */
/* 0x000fda0000704470 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fe20000000f00 */
/*00d0*/ HFMA2.MMA R0, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff007435 */
/* 0x000fe200000001ff */
/*00e0*/ IMAD R9, R2.reuse, c[0x0][0x170], R3 ; /* 0x00005c0002097a24 */
/* 0x040fe200078e0203 */
/*00f0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0100*/ IMAD R4, R2, R7, -c[0x0][0x170] ; /* 0x80005c0002047624 */
/* 0x000fca00078e0207 */
/*0110*/ IADD3 R5, R3, R4, RZ ; /* 0x0000000403057210 */
/* 0x000fca0007ffe0ff */
/*0120*/ IMAD.WIDE R2, R5, R0, c[0x0][0x168] ; /* 0x00005a0005027625 */
/* 0x000fc800078e0200 */
/*0130*/ IMAD R7, R7, 0x2, R5 ; /* 0x0000000207077824 */
/* 0x000fe200078e0205 */
/*0140*/ LDG.E.CONSTANT R8, [R2.64+-0x4] ; /* 0xfffffc0402087981 */
/* 0x000ea2000c1e9900 */
/*0150*/ IMAD.WIDE R4, R9, R0, c[0x0][0x168] ; /* 0x00005a0009047625 */
/* 0x000fc600078e0200 */
/*0160*/ LDG.E.CONSTANT R11, [R2.64+0x4] ; /* 0x00000404020b7981 */
/* 0x000ea2000c1e9900 */
/*0170*/ IMAD.WIDE R6, R7, R0, c[0x0][0x168] ; /* 0x00005a0007067625 */
/* 0x000fc600078e0200 */
/*0180*/ LDG.E.CONSTANT R19, [R2.64] ; /* 0x0000000402137981 */
/* 0x000ee8000c1e9900 */
/*0190*/ LDG.E.CONSTANT R13, [R4.64+0x4] ; /* 0x00000404040d7981 */
/* 0x000f28000c1e9900 */
/*01a0*/ LDG.E.CONSTANT R12, [R6.64+-0x4] ; /* 0xfffffc04060c7981 */
/* 0x000f68000c1e9900 */
/*01b0*/ LDG.E.CONSTANT R15, [R4.64+-0x4] ; /* 0xfffffc04040f7981 */
/* 0x000f68000c1e9900 */
/*01c0*/ LDG.E.CONSTANT R14, [R6.64] ; /* 0x00000004060e7981 */
/* 0x000f68000c1e9900 */
/*01d0*/ LDG.E.CONSTANT R17, [R6.64+0x4] ; /* 0x0000040406117981 */
/* 0x000f62000c1e9900 */
/*01e0*/ IADD3 R10, -R8, R11, RZ ; /* 0x0000000b080a7210 */
/* 0x004fc40007ffe1ff */
/*01f0*/ LEA R19, R19, R8, 0x1 ; /* 0x0000000813137211 */
/* 0x008fc600078e08ff */
/*0200*/ IMAD R10, R13, 0x2, R10 ; /* 0x000000020d0a7824 */
/* 0x010fe200078e020a */
/*0210*/ IADD3 R19, -R12, R19, R11 ; /* 0x000000130c137210 */
/* 0x020fc60007ffe10b */
/*0220*/ IMAD R10, R15, -0x2, R10 ; /* 0xfffffffe0f0a7824 */
/* 0x000fe400078e020a */
/*0230*/ IMAD R14, R14, -0x2, R19 ; /* 0xfffffffe0e0e7824 */
/* 0x000fc600078e0213 */
/*0240*/ IADD3 R10, -R12, R10, R17 ; /* 0x0000000a0c0a7210 */
/* 0x000fe40007ffe111 */
/*0250*/ IADD3 R14, -R17, R14, RZ ; /* 0x0000000e110e7210 */
/* 0x000fc60007ffe1ff */
/*0260*/ IMAD R3, R10, R10, RZ ; /* 0x0000000a0a037224 */
/* 0x000fc800078e02ff */
/*0270*/ IMAD R3, R14, R14, R3 ; /* 0x0000000e0e037224 */
/* 0x000fca00078e0203 */
/*0280*/ ISETP.GT.AND P0, PT, R3, c[0x0][0x178], PT ; /* 0x00005e0003007a0c */
/* 0x000fe20003f04270 */
/*0290*/ IMAD.WIDE R2, R9, R0, c[0x0][0x160] ; /* 0x0000580009027625 */
/* 0x000fd800078e0200 */
/*02a0*/ @!P0 STG.E [R2.64], RZ ; /* 0x000000ff02008986 */
/* 0x0001e2000c101904 */
/*02b0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*02c0*/ MOV R5, 0xff ; /* 0x000000ff00057802 */
/* 0x000fca0000000f00 */
/*02d0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*02e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02f0*/ BRA 0x2f0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8d_sobel1PiPjiii
.globl _Z8d_sobel1PiPjiii
.p2align 8
.type _Z8d_sobel1PiPjiii,@function
_Z8d_sobel1PiPjiii:
s_load_b32 s2, s[0:1], 0x2c
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_add_nc_u32_e32 v2, -1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, -1, v1
v_cmp_gt_u32_e32 vcc_lo, 0x1de, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_u32_e64 s2, 0x17e, v3
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b32 s2, s[0:1], 0x10
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[3:4], null, v0, s2, v[1:2]
v_mad_u64_u32 v[5:6], null, v2, s2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v0, s2, v3
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[2:3], 2, v[3:4]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[4:5], 2, v[5:6]
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
s_clause 0x5
global_load_b32 v10, v[0:1], off offset:-4
global_load_b32 v11, v[6:7], off offset:-4
global_load_b64 v[0:1], v[0:1], off
global_load_b64 v[8:9], v[4:5], off
global_load_b32 v4, v[4:5], off offset:-4
global_load_b32 v5, v[6:7], off offset:4
s_waitcnt vmcnt(2)
v_add_nc_u32_e32 v6, v9, v1
s_waitcnt vmcnt(1)
v_add_nc_u32_e32 v7, v4, v10
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v5, v5, v11
v_add_nc_u32_e32 v4, v4, v9
v_add_nc_u32_e32 v1, v1, v10
v_sub_nc_u32_e32 v6, v6, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v1, v4, v1
v_lshl_add_u32 v5, v5, 1, v6
v_sub_nc_u32_e32 v6, v8, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v0, v5, v5
v_lshl_add_u32 v1, v6, 1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[4:5], null, v1, v1, v[0:1]
v_add_co_u32 v0, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v3, vcc_lo
v_cmp_lt_i32_e32 vcc_lo, s0, v4
v_cndmask_b32_e64 v2, 0, 0xff, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8d_sobel1PiPjiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8d_sobel1PiPjiii, .Lfunc_end0-_Z8d_sobel1PiPjiii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z8d_sobel2PiPjiii
.globl _Z8d_sobel2PiPjiii
.p2align 8
.type _Z8d_sobel2PiPjiii,@function
_Z8d_sobel2PiPjiii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x10
v_and_b32_e32 v4, 0x3ff, v0
v_bfe_u32 v5, v0, 10, 10
s_load_b64 s[6:7], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v9, 1, v4
v_add_nc_u32_e32 v8, 1, v5
v_or_b32_e32 v12, v4, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v11, 2, v8
v_mad_u32_u24 v11, v9, 0x88, v11
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[0:1], null, s14, s3, v[4:5]
v_mad_u64_u32 v[1:2], null, s15, s2, v[5:6]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v0, s8, v[1:2]
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[2:3]
v_add_co_u32 v6, vcc_lo, s6, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
global_load_b32 v10, v[6:7], off
s_waitcnt vmcnt(0)
ds_store_b32 v11, v10
v_cmpx_eq_u32_e32 0, v12
s_cbranch_execz .LBB1_2
v_add_nc_u32_e32 v12, -1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[10:11], null, v12, s8, v[1:2]
v_ashrrev_i32_e32 v11, 31, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], 2, v[10:11]
v_add_co_u32 v10, vcc_lo, s6, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v11, vcc_lo, s7, v11, vcc_lo
global_load_b32 v10, v[10:11], off offset:-4
v_lshlrev_b32_e32 v11, 2, v5
v_mad_u32_u24 v11, v4, 0x88, v11
s_waitcnt vmcnt(0)
ds_store_b32 v11, v10
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s2
v_cmp_eq_u32_e64 s3, 0, v4
v_cmp_lt_u32_e32 vcc_lo, 30, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s3, vcc_lo
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB1_4
v_add_nc_u32_e32 v12, -1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[10:11], null, v12, s8, v[1:2]
v_ashrrev_i32_e32 v11, 31, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], 2, v[10:11]
v_add_co_u32 v10, s2, s6, v10
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v11, s2, s7, v11, s2
global_load_b32 v10, v[10:11], off offset:4
v_lshlrev_b32_e32 v11, 2, v5
s_waitcnt vmcnt(0)
ds_store_b32 v11, v10 offset:8
.LBB1_4:
s_or_b32 exec_lo, exec_lo, s4
v_mul_lo_u32 v10, s8, v0
v_cmp_eq_u32_e64 s2, 0, v5
v_cmp_lt_u32_e64 s4, 30, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s5, s4, s2
s_and_saveexec_b32 s9, s5
s_cbranch_execz .LBB1_6
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v11, v10, s8, v1
v_ashrrev_i32_e32 v12, 31, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[11:12], 2, v[11:12]
v_add_co_u32 v11, s5, s6, v11
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v12, s5, s7, v12, s5
global_load_b32 v11, v[11:12], off offset:-4
v_mul_u32_u24_e32 v12, 0x88, v4
s_waitcnt vmcnt(0)
ds_store_b32 v12, v11 offset:272
.LBB1_6:
s_or_b32 exec_lo, exec_lo, s9
s_and_b32 s5, s4, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s9, s5
s_cbranch_execz .LBB1_8
v_add3_u32 v11, v10, s8, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v12, 31, v11
v_lshlrev_b64 v[11:12], 2, v[11:12]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v11, s5, s6, v11
v_add_co_ci_u32_e64 v12, s5, s7, v12, s5
global_load_b32 v11, v[11:12], off offset:4
v_lshlrev_b32_e32 v12, 2, v5
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v12, v4, 0x88, v12
s_waitcnt vmcnt(0)
ds_store_b32 v12, v11 offset:280
.LBB1_8:
s_or_b32 exec_lo, exec_lo, s9
s_and_saveexec_b32 s5, s3
s_cbranch_execz .LBB1_10
v_add_nc_u32_e32 v13, -1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[11:12], null, v13, s8, v[1:2]
v_ashrrev_i32_e32 v12, 31, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[11:12], 2, v[11:12]
v_add_co_u32 v11, s3, s6, v11
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v12, s3, s7, v12, s3
global_load_b32 v11, v[11:12], off
v_lshlrev_b32_e32 v12, 2, v8
s_waitcnt vmcnt(0)
ds_store_b32 v12, v11
.LBB1_10:
s_or_b32 exec_lo, exec_lo, s5
s_and_saveexec_b32 s5, s4
s_cbranch_execz .LBB1_12
v_add3_u32 v10, v10, s8, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v11, 31, v10
v_lshlrev_b64 v[10:11], 2, v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v10, s3, s6, v10
v_add_co_ci_u32_e64 v11, s3, s7, v11, s3
global_load_b32 v10, v[10:11], off
v_lshlrev_b32_e32 v11, 2, v8
s_delay_alu instid0(VALU_DEP_1)
v_mad_u32_u24 v11, v4, 0x88, v11
s_waitcnt vmcnt(0)
ds_store_b32 v11, v10 offset:272
.LBB1_12:
s_or_b32 exec_lo, exec_lo, s5
s_load_b64 s[4:5], s[0:1], 0x0
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_14
global_load_b32 v10, v[6:7], off offset:-4
v_mul_u32_u24_e32 v11, 0x88, v9
s_waitcnt vmcnt(0)
ds_store_b32 v11, v10
.LBB1_14:
s_or_b32 exec_lo, exec_lo, s3
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB1_16
global_load_b32 v6, v[6:7], off offset:4
v_mul_u32_u24_e32 v7, 0x88, v9
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v7, v5, 2, v7
s_waitcnt vmcnt(0)
ds_store_b32 v7, v6 offset:8
.LBB1_16:
s_or_b32 exec_lo, exec_lo, s2
v_lshlrev_b32_e32 v5, 2, v5
s_movk_i32 s2, 0x110
s_waitcnt lgkmcnt(0)
v_mad_u32_u24 v6, v4, 0x88, s2
s_barrier
v_add_nc_u32_e32 v7, 8, v5
v_mad_u32_u24 v10, v4, 0x88, v5
buffer_gl0_inv
v_add_nc_u32_e32 v12, v6, v5
v_mad_u32_u24 v5, v9, 0x88, v5
v_add_nc_u32_e32 v11, v6, v7
v_mad_u32_u24 v13, v4, 0x88, v7
v_mad_u32_u24 v7, v9, 0x88, v7
v_lshlrev_b32_e32 v8, 2, v8
s_load_b64 s[6:7], s[0:1], 0x14
ds_load_b32 v9, v11
ds_load_b32 v11, v12
ds_load_b32 v12, v13
ds_load_b32 v10, v10
ds_load_b32 v7, v7
ds_load_b32 v5, v5
v_mad_u32_u24 v4, v4, 0x88, v8
v_add_nc_u32_e32 v6, v6, v8
ds_load_b32 v4, v4
ds_load_b32 v6, v6
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_cmp_eq_u32_e32 vcc_lo, 0, v0
v_cmp_eq_u32_e64 s0, 0, v1
s_add_i32 s8, s8, -1
s_waitcnt lgkmcnt(0)
v_cmp_eq_u32_e64 s1, s8, v1
s_barrier
s_or_b32 s0, vcc_lo, s0
buffer_gl0_inv
v_add_nc_u32_e32 v8, v12, v9
v_add_nc_u32_e32 v13, v10, v11
v_add_nc_u32_e32 v9, v9, v11
v_sub_nc_u32_e32 v5, v7, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_sub_nc_u32_e32 v7, v8, v13
v_add_nc_u32_e32 v8, v10, v12
v_sub_nc_u32_e32 v6, v4, v6
v_lshl_add_u32 v5, v5, 1, v7
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v7, v8, v9
v_mul_lo_u32 v4, v5, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshl_add_u32 v7, v6, 1, v7
v_mad_u64_u32 v[5:6], null, v7, v7, v[4:5]
v_add_co_u32 v6, s2, s4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_add_co_ci_u32_e64 v7, s2, s5, v3, s2
s_add_i32 s2, s6, -1
v_cmp_eq_u32_e64 s2, s2, v0
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_lt_i32_e64 s3, s7, v5
s_or_b32 s0, s0, s2
s_delay_alu instid0(VALU_DEP_1)
v_cndmask_b32_e64 v0, 0, 0xff, s3
s_or_b32 s0, s0, s1
global_store_b32 v[6:7], v0, off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB1_18
v_add_co_u32 v0, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v3, vcc_lo
v_mov_b32_e32 v2, 0
global_store_b32 v[0:1], v2, off
.LBB1_18:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8d_sobel2PiPjiii
.amdhsa_group_segment_fixed_size 4624
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z8d_sobel2PiPjiii, .Lfunc_end1-_Z8d_sobel2PiPjiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .actual_access: write_only
.address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .actual_access: read_only
.address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8d_sobel1PiPjiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8d_sobel1PiPjiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .actual_access: write_only
.address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .actual_access: read_only
.address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4624
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8d_sobel2PiPjiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8d_sobel2PiPjiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000571ff_00000000-6_cudaSobel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii
.type _Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii, @function
_Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq %rdi, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8d_sobel1PiPjiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii, .-_Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii
.globl _Z8d_sobel1PiPjiii
.type _Z8d_sobel1PiPjiii, @function
_Z8d_sobel1PiPjiii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z8d_sobel1PiPjiiiPiPjiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8d_sobel1PiPjiii, .-_Z8d_sobel1PiPjiii
.globl _Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii
.type _Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii, @function
_Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii:
.LFB2053:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq %rdi, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
movq %rsi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8d_sobel2PiPjiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii, .-_Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii
.globl _Z8d_sobel2PiPjiii
.type _Z8d_sobel2PiPjiii, @function
_Z8d_sobel2PiPjiii:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z8d_sobel2PiPjiiiPiPjiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z8d_sobel2PiPjiii, .-_Z8d_sobel2PiPjiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8d_sobel2PiPjiii"
.LC1:
.string "_Z8d_sobel1PiPjiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8d_sobel2PiPjiii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z8d_sobel1PiPjiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudaSobel.hip"
.globl _Z23__device_stub__d_sobel1PiPjiii # -- Begin function _Z23__device_stub__d_sobel1PiPjiii
.p2align 4, 0x90
.type _Z23__device_stub__d_sobel1PiPjiii,@function
_Z23__device_stub__d_sobel1PiPjiii: # @_Z23__device_stub__d_sobel1PiPjiii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8d_sobel1PiPjiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z23__device_stub__d_sobel1PiPjiii, .Lfunc_end0-_Z23__device_stub__d_sobel1PiPjiii
.cfi_endproc
# -- End function
.globl _Z23__device_stub__d_sobel2PiPjiii # -- Begin function _Z23__device_stub__d_sobel2PiPjiii
.p2align 4, 0x90
.type _Z23__device_stub__d_sobel2PiPjiii,@function
_Z23__device_stub__d_sobel2PiPjiii: # @_Z23__device_stub__d_sobel2PiPjiii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8d_sobel2PiPjiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z23__device_stub__d_sobel2PiPjiii, .Lfunc_end1-_Z23__device_stub__d_sobel2PiPjiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8d_sobel1PiPjiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8d_sobel2PiPjiii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8d_sobel1PiPjiii,@object # @_Z8d_sobel1PiPjiii
.section .rodata,"a",@progbits
.globl _Z8d_sobel1PiPjiii
.p2align 3, 0x0
_Z8d_sobel1PiPjiii:
.quad _Z23__device_stub__d_sobel1PiPjiii
.size _Z8d_sobel1PiPjiii, 8
.type _Z8d_sobel2PiPjiii,@object # @_Z8d_sobel2PiPjiii
.globl _Z8d_sobel2PiPjiii
.p2align 3, 0x0
_Z8d_sobel2PiPjiii:
.quad _Z23__device_stub__d_sobel2PiPjiii
.size _Z8d_sobel2PiPjiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8d_sobel1PiPjiii"
.size .L__unnamed_1, 19
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z8d_sobel2PiPjiii"
.size .L__unnamed_2, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__d_sobel1PiPjiii
.addrsig_sym _Z23__device_stub__d_sobel2PiPjiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8d_sobel1PiPjiii
.addrsig_sym _Z8d_sobel2PiPjiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
The shared memory is allocated using the __shared__ memory space specifier.
Shared memory is expected to be much faster than global memory.
The following code sample is a straightforward implementation of matrix multiplication
that does not take advantage of shared memory. Each thread reads one row of A and one
column of B and computes the corresponding element of C. A is therefore read B.columns
times from global memory and B is read A.rows times.
Example adapted from the nVIDIA CUDA 9.1 samples
*/
#include <iostream>
#include <memory>
#include <algorithm>
struct Matrix{
int num_rows;
int num_columns;
float* elements;
};
__global__
void matrixMult(const Matrix a, const Matrix b, Matrix c){
float accumulate = 0.f;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int column = blockDim.x * blockIdx.x + threadIdx.x;
printf("\nthreadIdx(%d) threadIdy(%d)\n",threadIdx.x,threadIdx.y);
for(int i = 0; i != a.num_rows; ++i){
accumulate += a.elements[row * a.num_columns + i] * b.elements[i * c.num_columns + column];
}
c.elements[row * c.num_columns + column] = accumulate;
}
int main(){
size_t dimension = 3;
size_t dimension_matrix = dimension * dimension;
Matrix h_A, h_B, h_C;
h_A.num_rows = h_B.num_rows = h_C.num_rows = dimension;
h_A.num_columns = h_B.num_columns = h_C.num_columns = dimension;
h_A.elements = (float*)malloc(dimension_matrix * sizeof(float));
h_B.elements = (float*)malloc(dimension_matrix * sizeof(float));
h_C.elements = (float*)malloc(dimension_matrix * sizeof(float));
auto generate_element = [n = 1.f]() mutable {return (float)++n;};
std::generate(h_A.elements, h_A.elements + (dimension_matrix), generate_element);
std::generate(h_B.elements, h_B.elements + (dimension_matrix), generate_element);
Matrix d_A, d_B, d_C;
size_t size_bytes = dimension_matrix * sizeof(float);
d_A.num_rows = d_B.num_rows = d_C.num_rows = dimension;
d_A.num_columns = d_B.num_columns = d_C.num_columns = dimension;
cudaMalloc(&d_A.elements, size_bytes);
cudaMalloc(&d_B.elements, size_bytes);
cudaMalloc(&d_C.elements, size_bytes);
cudaMemcpy(d_A.elements, h_A.elements, size_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B.elements, h_B.elements, size_bytes, cudaMemcpyHostToDevice);
// Launching kernel
size_t threads_per_block = dimension;
dim3 dimBlock(threads_per_block,threads_per_block);
dim3 dimGrid(h_B.num_columns / dimBlock.x, h_A.num_rows / dimBlock.y);
std::cout << "\nLaunching CUDA kernel matrixMult<<<" << dimGrid.x
<< ", " << dimBlock.x << ">>>" << '\n';
matrixMult<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(h_C.elements, d_C.elements, size_bytes, cudaMemcpyDeviceToHost);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
// Check Results
for(size_t i = 0; i != dimension; ++i){
for(size_t j = 0; j != dimension; ++j){
int accumulator = 0;
for(size_t k = 0; k != dimension; ++k)
accumulator += h_A.elements[i * h_A.num_columns + k] * h_B.elements[k * h_B.num_columns + j];
if(accumulator != h_C.elements[i * h_A.num_columns + j]){
std::cerr << "Mismatch found in position " << i <<", " << j
<< ": Expected = " << accumulator
<< " Obtained = " << h_C.elements[i * h_A.num_columns + j] << '\n';
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
exit(EXIT_FAILURE);
}
}
}
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
std::cout << "\nSUCCESSFULLY EXECUTED!\n" << std::endl;
return 0;
} | code for sm_80
Function : _Z10matrixMult6MatrixS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R11, SR_TID.Y ; /* 0x00000000000b7919 */
/* 0x000e220000002200 */
/*0020*/ MOV R8, 0x0 ; /* 0x0000000000087802 */
/* 0x000fe40000000f00 */
/*0030*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fe20007ffe0ff */
/*0040*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */
/* 0x000e220000002100 */
/*0050*/ MOV R4, c[0x4][0x8] ; /* 0x0100020000047a02 */
/* 0x000fe40000000f00 */
/*0060*/ LDC.64 R8, c[0x4][R8] ; /* 0x0100000008087b82 */
/* 0x000e620000000a00 */
/*0070*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000ea20000002600 */
/*0080*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe40007f1e0ff */
/*0090*/ MOV R5, c[0x4][0xc] ; /* 0x0100030000057a02 */
/* 0x000fe20000000f00 */
/*00a0*/ S2R R17, SR_CTAID.X ; /* 0x0000000000117919 */
/* 0x000ee20000002500 */
/*00b0*/ IADD3.X R7, RZ, c[0x0][0x24], RZ, P0, !PT ; /* 0x00000900ff077a10 */
/* 0x000fc600007fe4ff */
/*00c0*/ STL.64 [R1], R10 ; /* 0x0000000a01007387 */
/* 0x0011e20000100a00 */
/*00d0*/ IMAD R2, R2, c[0x0][0x4], R11 ; /* 0x0000010002027a24 */
/* 0x004fe400078e020b */
/*00e0*/ IMAD R17, R17, c[0x0][0x0], R10 ; /* 0x0000000011117a24 */
/* 0x008fe400078e020a */
/*00f0*/ LEPC R10 ; /* 0x00000000000a734e */
/* 0x003fe20000000000 */
/*0100*/ MOV R3, 0x170 ; /* 0x0000017000037802 */
/* 0x000fe40000000f00 */
/*0110*/ MOV R20, 0xf0 ; /* 0x000000f000147802 */
/* 0x000fe40000000f00 */
/*0120*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*0130*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fc40000000f00 */
/*0140*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */
/* 0x000fc8000791e10a */
/*0150*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e250b */
/*0160*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x000fea0003c00000 */
/*0170*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x160], PT ; /* 0x00005800ff007a0c */
/* 0x000fe20003f05270 */
/*0180*/ HFMA2.MMA R27, -RZ, RZ, 0, 0 ; /* 0x00000000ff1b7435 */
/* 0x000fd800000001ff */
/*0190*/ @!P0 BRA 0xda0 ; /* 0x00000c0000008947 */
/* 0x000fea0003800000 */
/*01a0*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */
/* 0x000fe40000000f00 */
/*01b0*/ MOV R27, RZ ; /* 0x000000ff001b7202 */
/* 0x000fe40000000f00 */
/*01c0*/ IADD3 R0, R4.reuse, -0x1, RZ ; /* 0xffffffff04007810 */
/* 0x040fe40007ffe0ff */
/*01d0*/ LOP3.LUT R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fe400078ec0ff */
/*01e0*/ ISETP.GE.U32.AND P0, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe40003f06070 */
/*01f0*/ MOV R0, RZ ; /* 0x000000ff00007202 */
/* 0x000fd60000000f00 */
/*0200*/ @!P0 BRA 0xc60 ; /* 0x00000a5000008947 */
/* 0x000fea0003800000 */
/*0210*/ IADD3 R5, -R4, c[0x0][0x160], RZ ; /* 0x0000580004057a10 */
/* 0x000fe20007ffe1ff */
/*0220*/ BSSY B0, 0xc60 ; /* 0x00000a3000007945 */
/* 0x000fe20003800000 */
/*0230*/ MOV R22, 0x4 ; /* 0x0000000400167802 */
/* 0x000fe20000000f00 */
/*0240*/ HFMA2.MMA R27, -RZ, RZ, 0, 0 ; /* 0x00000000ff1b7435 */
/* 0x000fe200000001ff */
/*0250*/ ISETP.GT.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f04270 */
/*0260*/ IMAD R6, R2, c[0x0][0x164], RZ ; /* 0x0000590002067a24 */
/* 0x000fe200078e02ff */
/*0270*/ MOV R0, RZ ; /* 0x000000ff00007202 */
/* 0x000fe20000000f00 */
/*0280*/ IMAD.WIDE R22, R17, R22, c[0x0][0x178] ; /* 0x00005e0011167625 */
/* 0x000fe200078e0216 */
/*0290*/ MOV R8, c[0x0][0x168] ; /* 0x00005a0000087a02 */
/* 0x000fe40000000f00 */
/*02a0*/ MOV R3, c[0x0][0x16c] ; /* 0x00005b0000037a02 */
/* 0x000fce0000000f00 */
/*02b0*/ @!P0 BRA 0xab0 ; /* 0x000007f000008947 */
/* 0x000fea0003800000 */
/*02c0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */
/* 0x000fe40003f24270 */
/*02d0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*02e0*/ @!P1 BRA 0x7c0 ; /* 0x000004d000009947 */
/* 0x000fea0003800000 */
/*02f0*/ BSSY B1, 0x7c0 ; /* 0x000004c000017945 */
/* 0x000fe20003800000 */
/*0300*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0310*/ MOV R9, R3 ; /* 0x0000000300097202 */
/* 0x000fe20000000f00 */
/*0320*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0330*/ MOV R7, c[0x0][0x184] ; /* 0x0000610000077a02 */
/* 0x000fe20000000f00 */
/*0340*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x0000a4000c1e1900 */
/*0350*/ IMAD.WIDE R12, R6, 0x4, R8 ; /* 0x00000004060c7825 */
/* 0x000fc800078e0208 */
/*0360*/ IMAD.WIDE R18, R7.reuse, 0x4, R22 ; /* 0x0000000407127825 */
/* 0x040fe200078e0216 */
/*0370*/ LDG.E R28, [R12.64] ; /* 0x000000040c1c7981 */
/* 0x000ea8000c1e1900 */
/*0380*/ LDG.E R20, [R18.64] ; /* 0x0000000412147981 */
/* 0x0002e2000c1e1900 */
/*0390*/ IMAD.WIDE R14, R7, 0x4, R18 ; /* 0x00000004070e7825 */
/* 0x000fc600078e0212 */
/*03a0*/ LDG.E R21, [R12.64+0x4] ; /* 0x000004040c157981 */
/* 0x000ee8000c1e1900 */
/*03b0*/ LDG.E R9, [R14.64] ; /* 0x000000040e097981 */
/* 0x000968000c1e1900 */
/*03c0*/ LDG.E R10, [R12.64+0x8] ; /* 0x000008040c0a7981 */
/* 0x000f68000c1e1900 */
/*03d0*/ LDG.E R16, [R12.64+0xc] ; /* 0x00000c040c107981 */
/* 0x000f62000c1e1900 */
/*03e0*/ IMAD.WIDE R14, R7, 0x4, R14 ; /* 0x00000004070e7825 */
/* 0x010fc600078e020e */
/*03f0*/ LDG.E R19, [R12.64+0x10] ; /* 0x000010040c137981 */
/* 0x002f28000c1e1900 */
/*0400*/ LDG.E R11, [R14.64] ; /* 0x000000040e0b7981 */
/* 0x000322000c1e1900 */
/*0410*/ IMAD.WIDE R22, R7, 0x4, R14 ; /* 0x0000000407167825 */
/* 0x001fca00078e020e */
/*0420*/ LDG.E R18, [R22.64] ; /* 0x0000000416127981 */
/* 0x000522000c1e1900 */
/*0430*/ IMAD.WIDE R24, R7, 0x4, R22 ; /* 0x0000000407187825 */
/* 0x000fc800078e0216 */
/*0440*/ FFMA R22, R26, R28, R27 ; /* 0x0000001c1a167223 */
/* 0x004fe4000000001b */
/*0450*/ LDG.E R26, [R24.64] ; /* 0x00000004181a7981 */
/* 0x000aa2000c1e1900 */
/*0460*/ IMAD.WIDE R28, R7, 0x4, R24 ; /* 0x00000004071c7825 */
/* 0x000fc600078e0218 */
/*0470*/ LDG.E R27, [R12.64+0x14] ; /* 0x000014040c1b7981 */
/* 0x000ea2000c1e1900 */
/*0480*/ FFMA R22, R20, R21, R22 ; /* 0x0000001514167223 */
/* 0x008fc60000000016 */
/*0490*/ LDG.E R21, [R28.64] ; /* 0x000000041c157981 */
/* 0x0000e2000c1e1900 */
/*04a0*/ IMAD.WIDE R14, R7, 0x4, R28 ; /* 0x00000004070e7825 */
/* 0x002fc600078e021c */
/*04b0*/ LDG.E R20, [R12.64+0x18] ; /* 0x000018040c147981 */
/* 0x000ee2000c1e1900 */
/*04c0*/ FFMA R24, R9, R10, R22 ; /* 0x0000000a09187223 */
/* 0x020fc60000000016 */
/*04d0*/ LDG.E R9, [R12.64+0x1c] ; /* 0x00001c040c097981 */
/* 0x000f62000c1e1900 */
/*04e0*/ IMAD.WIDE R22, R7, 0x4, R14 ; /* 0x0000000407167825 */
/* 0x000fc600078e020e */
/*04f0*/ LDG.E R10, [R14.64] ; /* 0x000000040e0a7981 */
/* 0x000968000c1e1900 */
/*0500*/ LDG.E R28, [R12.64+0x24] ; /* 0x000024040c1c7981 */
/* 0x001f62000c1e1900 */
/*0510*/ FFMA R14, R11, R16, R24 ; /* 0x000000100b0e7223 */
/* 0x010fc60000000018 */
/*0520*/ LDG.E R16, [R12.64+0x20] ; /* 0x000020040c107981 */
/* 0x000f22000c1e1900 */
/*0530*/ IMAD.WIDE R24, R7, 0x4, R22 ; /* 0x0000000407187825 */
/* 0x000fc600078e0216 */
/*0540*/ LDG.E R11, [R22.64] ; /* 0x00000004160b7981 */
/* 0x000128000c1e1900 */
/*0550*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x000322000c1e1900 */
/*0560*/ FFMA R14, R18, R19, R14 ; /* 0x00000013120e7223 */
/* 0x000fe4000000000e */
/*0570*/ IMAD.WIDE R18, R7, 0x4, R24 ; /* 0x0000000407127825 */
/* 0x000fe400078e0218 */
/*0580*/ LDG.E R25, [R12.64+0x30] ; /* 0x000030040c197981 */
/* 0x002f24000c1e1900 */
/*0590*/ FFMA R26, R26, R27, R14 ; /* 0x0000001b1a1a7223 */
/* 0x004fc4000000000e */
/*05a0*/ IMAD.WIDE R14, R7, 0x4, R18 ; /* 0x00000004070e7825 */
/* 0x000fe200078e0212 */
/*05b0*/ LDG.E R27, [R12.64+0x28] ; /* 0x000028040c1b7981 */
/* 0x000ea8000c1e1900 */
/*05c0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x0006a4000c1e1900 */
/*05d0*/ FFMA R19, R21, R20, R26 ; /* 0x0000001415137223 */
/* 0x008fe4000000001a */
/*05e0*/ IMAD.WIDE R20, R7, 0x4, R14 ; /* 0x0000000407147825 */
/* 0x000fe200078e020e */
/*05f0*/ LDG.E R26, [R14.64] ; /* 0x000000040e1a7981 */
/* 0x0002e6000c1e1900 */
/*0600*/ FFMA R10, R10, R9, R19 ; /* 0x000000090a0a7223 */
/* 0x020fc40000000013 */
/*0610*/ IMAD.WIDE R22, R7, 0x4, R20 ; /* 0x0000000407167825 */
/* 0x001fe200078e0214 */
/*0620*/ LDG.E R9, [R12.64+0x2c] ; /* 0x00002c040c097981 */
/* 0x000ee8000c1e1900 */
/*0630*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000162000c1e1900 */
/*0640*/ FFMA R24, R11, R16, R10 ; /* 0x000000100b187223 */
/* 0x010fe4000000000a */
/*0650*/ IMAD.WIDE R10, R7.reuse, 0x4, R22 ; /* 0x00000004070a7825 */
/* 0x040fe200078e0216 */
/*0660*/ LDG.E R16, [R22.64] ; /* 0x0000000416107981 */
/* 0x000968000c1e1900 */
/*0670*/ LDG.E R19, [R12.64+0x34] ; /* 0x000034040c137981 */
/* 0x000f62000c1e1900 */
/*0680*/ IMAD.WIDE R14, R7, 0x4, R10 ; /* 0x00000004070e7825 */
/* 0x002fc800078e020a */
/*0690*/ FFMA R22, R29, R28, R24 ; /* 0x0000001c1d167223 */
/* 0x010fe20000000018 */
/*06a0*/ LDG.E R21, [R14.64] ; /* 0x000000040e157981 */
/* 0x001f28000c1e1900 */
/*06b0*/ LDG.E R29, [R12.64+0x38] ; /* 0x000038040c1d7981 */
/* 0x000f28000c1e1900 */
/*06c0*/ LDG.E R24, [R10.64] ; /* 0x000000040a187981 */
/* 0x000f28000c1e1900 */
/*06d0*/ LDG.E R28, [R12.64+0x3c] ; /* 0x00003c040c1c7981 */
/* 0x000f22000c1e1900 */
/*06e0*/ IADD3 R5, R5, -0x10, RZ ; /* 0xfffffff005057810 */
/* 0x000fc80007ffe0ff */
/*06f0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */
/* 0x000fe40003f24270 */
/*0700*/ IADD3 R8, P2, R8, 0x40, RZ ; /* 0x0000004008087810 */
/* 0x000fe40007f5e0ff */
/*0710*/ IADD3 R0, R0, 0x10, RZ ; /* 0x0000001000007810 */
/* 0x000fe40007ffe0ff */
/*0720*/ IADD3.X R3, RZ, R3, RZ, P2, !PT ; /* 0x00000003ff037210 */
/* 0x000fe200017fe4ff */
/*0730*/ FFMA R18, R18, R27, R22 ; /* 0x0000001b12127223 */
/* 0x004fe40000000016 */
/*0740*/ IMAD.WIDE R22, R7, 0x4, R14 ; /* 0x0000000407167825 */
/* 0x000fc800078e020e */
/*0750*/ FFMA R9, R26, R9, R18 ; /* 0x000000091a097223 */
/* 0x008fc80000000012 */
/*0760*/ FFMA R9, R20, R25, R9 ; /* 0x0000001914097223 */
/* 0x020fc80000000009 */
/*0770*/ FFMA R9, R16, R19, R9 ; /* 0x0000001310097223 */
/* 0x000fc80000000009 */
/*0780*/ FFMA R9, R24, R29, R9 ; /* 0x0000001d18097223 */
/* 0x010fc80000000009 */
/*0790*/ FFMA R27, R21, R28, R9 ; /* 0x0000001c151b7223 */
/* 0x000fe20000000009 */
/*07a0*/ @P1 BRA 0x310 ; /* 0xfffffb6000001947 */
/* 0x000fea000383ffff */
/*07b0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*07c0*/ ISETP.GT.AND P1, PT, R5, 0x4, PT ; /* 0x000000040500780c */
/* 0x000fe20003f24270 */
/*07d0*/ BSSY B1, 0xa90 ; /* 0x000002b000017945 */
/* 0x000fd80003800000 */
/*07e0*/ @!P1 BRA 0xa80 ; /* 0x0000029000009947 */
/* 0x000fea0003800000 */
/*07f0*/ MOV R9, R3 ; /* 0x0000000300097202 */
/* 0x000fe20000000f00 */
/*0800*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0810*/ MOV R7, c[0x0][0x184] ; /* 0x0000610000077a02 */
/* 0x000fe20000000f00 */
/*0820*/ LDG.E R16, [R22.64] ; /* 0x0000000416107981 */
/* 0x0000a4000c1e1900 */
/*0830*/ IMAD.WIDE R10, R6, 0x4, R8 ; /* 0x00000004060a7825 */
/* 0x000fc800078e0208 */
/*0840*/ IMAD.WIDE R24, R7.reuse, 0x4, R22 ; /* 0x0000000407187825 */
/* 0x040fe200078e0216 */
/*0850*/ LDG.E R29, [R10.64] ; /* 0x000000040a1d7981 */
/* 0x000ea8000c1e1900 */
/*0860*/ LDG.E R9, [R10.64+0x4] ; /* 0x000004040a097981 */
/* 0x000ee8000c1e1900 */
/*0870*/ LDG.E R26, [R24.64] ; /* 0x00000004181a7981 */
/* 0x0002e2000c1e1900 */
/*0880*/ IMAD.WIDE R14, R7, 0x4, R24 ; /* 0x00000004070e7825 */
/* 0x000fc600078e0218 */
/*0890*/ LDG.E R28, [R10.64+0x8] ; /* 0x000008040a1c7981 */
/* 0x000f26000c1e1900 */
/*08a0*/ IMAD.WIDE R18, R7.reuse, 0x4, R14 ; /* 0x0000000407127825 */
/* 0x040fe400078e020e */
/*08b0*/ LDG.E R15, [R14.64] ; /* 0x000000040e0f7981 */
/* 0x000b28000c1e1900 */
/*08c0*/ IMAD.WIDE R12, R7.reuse, 0x4, R18 ; /* 0x00000004070c7825 */
/* 0x040fe200078e0212 */
/*08d0*/ LDG.E R25, [R18.64] ; /* 0x0000000412197981 */
/* 0x00232a000c1e1900 */
/*08e0*/ IMAD.WIDE R20, R7.reuse, 0x4, R12 ; /* 0x0000000407147825 */
/* 0x040fe200078e020c */
/*08f0*/ LDG.E R14, [R10.64+0xc] ; /* 0x00000c040a0e7981 */
/* 0x020f68000c1e1900 */
/*0900*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000362000c1e1900 */
/*0910*/ IMAD.WIDE R22, R7, 0x4, R20 ; /* 0x0000000407167825 */
/* 0x001fc600078e0214 */
/*0920*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000f68000c1e1900 */
/*0930*/ LDG.E R13, [R10.64+0x10] ; /* 0x000010040a0d7981 */
/* 0x002f62000c1e1900 */
/*0940*/ IMAD.WIDE R18, R7, 0x4, R22 ; /* 0x0000000407127825 */
/* 0x000fca00078e0216 */
/*0950*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */
/* 0x000f62000c1e1900 */
/*0960*/ FFMA R16, R16, R29, R27 ; /* 0x0000001d10107223 */
/* 0x004fc6000000001b */
/*0970*/ LDG.E R27, [R10.64+0x14] ; /* 0x000014040a1b7981 */
/* 0x000ea8000c1e1900 */
/*0980*/ LDG.E R29, [R10.64+0x1c] ; /* 0x00001c040a1d7981 */
/* 0x000ea2000c1e1900 */
/*0990*/ FFMA R26, R26, R9, R16 ; /* 0x000000091a1a7223 */
/* 0x008fc60000000010 */
/*09a0*/ LDG.E R16, [R10.64+0x18] ; /* 0x000018040a107981 */
/* 0x000ee8000c1e1900 */
/*09b0*/ LDG.E R9, [R22.64] ; /* 0x0000000416097981 */
/* 0x0000e2000c1e1900 */
/*09c0*/ FFMA R15, R15, R28, R26 ; /* 0x0000001c0f0f7223 */
/* 0x010fc8000000001a */
/*09d0*/ FFMA R14, R25, R14, R15 ; /* 0x0000000e190e7223 */
/* 0x020fe2000000000f */
/*09e0*/ IADD3 R8, P1, R8, 0x20, RZ ; /* 0x0000002008087810 */
/* 0x000fe20007f3e0ff */
/*09f0*/ IMAD.WIDE R22, R7, 0x4, R18 ; /* 0x0000000407167825 */
/* 0x001fc800078e0212 */
/*0a00*/ FFMA R12, R12, R13, R14 ; /* 0x0000000d0c0c7223 */
/* 0x000fe2000000000e */
/*0a10*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0a20*/ IADD3 R0, R0, 0x8, RZ ; /* 0x0000000800007810 */
/* 0x000fe40007ffe0ff */
/*0a30*/ IADD3 R5, R5, -0x8, RZ ; /* 0xfffffff805057810 */
/* 0x000fe40007ffe0ff */
/*0a40*/ IADD3.X R3, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff037210 */
/* 0x000fe20000ffe4ff */
/*0a50*/ FFMA R12, R20, R27, R12 ; /* 0x0000001b140c7223 */
/* 0x004fc8000000000c */
/*0a60*/ FFMA R9, R9, R16, R12 ; /* 0x0000001009097223 */
/* 0x008fc8000000000c */
/*0a70*/ FFMA R27, R24, R29, R9 ; /* 0x0000001d181b7223 */
/* 0x000fe40000000009 */
/*0a80*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0a90*/ ISETP.NE.OR P0, PT, R5, RZ, P0 ; /* 0x000000ff0500720c */
/* 0x000fda0000705670 */
/*0aa0*/ @!P0 BRA 0xc50 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0ab0*/ MOV R7, c[0x0][0x184] ; /* 0x0000610000077a02 */
/* 0x000fe20000000f00 */
/*0ac0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0ad0*/ MOV R9, R3 ; /* 0x0000000300097202 */
/* 0x000fc60000000f00 */
/*0ae0*/ IMAD.WIDE R12, R7.reuse, 0x4, R22 ; /* 0x00000004070c7825 */
/* 0x040fe400078e0216 */
/*0af0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x000ea4000c1e1900 */
/*0b00*/ IMAD.WIDE R10, R6, 0x4, R8 ; /* 0x00000004060a7825 */
/* 0x000fe400078e0208 */
/*0b10*/ LDG.E R16, [R12.64] ; /* 0x000000040c107981 */
/* 0x000ee4000c1e1900 */
/*0b20*/ IMAD.WIDE R18, R7.reuse, 0x4, R12 ; /* 0x0000000407127825 */
/* 0x040fe400078e020c */
/*0b30*/ LDG.E R9, [R10.64] ; /* 0x000000040a097981 */
/* 0x000ea8000c1e1900 */
/*0b40*/ LDG.E R20, [R10.64+0x4] ; /* 0x000004040a147981 */
/* 0x000ee2000c1e1900 */
/*0b50*/ IMAD.WIDE R14, R7, 0x4, R18 ; /* 0x00000004070e7825 */
/* 0x000fc600078e0212 */
/*0b60*/ LDG.E R21, [R10.64+0x8] ; /* 0x000008040a157981 */
/* 0x000f28000c1e1900 */
/*0b70*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */
/* 0x000f28000c1e1900 */
/*0b80*/ LDG.E R25, [R10.64+0xc] ; /* 0x00000c040a197981 */
/* 0x000f68000c1e1900 */
/*0b90*/ LDG.E R26, [R14.64] ; /* 0x000000040e1a7981 */
/* 0x000f62000c1e1900 */
/*0ba0*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */
/* 0x000fc80007ffe0ff */
/*0bb0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe40003f05270 */
/*0bc0*/ IADD3 R8, P1, R8, 0x10, RZ ; /* 0x0000001008087810 */
/* 0x000fe40007f3e0ff */
/*0bd0*/ IADD3 R0, R0, 0x4, RZ ; /* 0x0000000400007810 */
/* 0x000fe40007ffe0ff */
/*0be0*/ IADD3.X R3, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff037210 */
/* 0x000fe20000ffe4ff */
/*0bf0*/ FFMA R9, R22, R9, R27 ; /* 0x0000000916097223 */
/* 0x004fc8000000001b */
/*0c00*/ FFMA R9, R16, R20, R9 ; /* 0x0000001410097223 */
/* 0x008fe40000000009 */
/*0c10*/ IMAD.WIDE R22, R7, 0x4, R14 ; /* 0x0000000407167825 */
/* 0x000fc800078e020e */
/*0c20*/ FFMA R9, R24, R21, R9 ; /* 0x0000001518097223 */
/* 0x010fc80000000009 */
/*0c30*/ FFMA R27, R26, R25, R9 ; /* 0x000000191a1b7223 */
/* 0x020fe20000000009 */
/*0c40*/ @P0 BRA 0xab0 ; /* 0xfffffe6000000947 */
/* 0x000fea000383ffff */
/*0c50*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0c60*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f05270 */
/*0c70*/ BSSY B0, 0xda0 ; /* 0x0000012000007945 */
/* 0x000fd80003800000 */
/*0c80*/ @!P0 BRA 0xd90 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*0c90*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0ca0*/ IMAD R6, R2, c[0x0][0x164], R0 ; /* 0x0000590002067a24 */
/* 0x000fe400078e0200 */
/*0cb0*/ IMAD R0, R0, c[0x0][0x184], R17 ; /* 0x0000610000007a24 */
/* 0x000fce00078e0211 */
/*0cc0*/ IMAD.WIDE R6, R6, R9, c[0x0][0x168] ; /* 0x00005a0006067625 */
/* 0x000fc800078e0209 */
/*0cd0*/ IMAD.WIDE R8, R0, R9, c[0x0][0x178] ; /* 0x00005e0000087625 */
/* 0x000fc800078e0209 */
/*0ce0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0cf0*/ LDG.E R3, [R6.64] ; /* 0x0000000406037981 */
/* 0x0000a8000c1e1900 */
/*0d00*/ LDG.E R0, [R8.64] ; /* 0x0000000408007981 */
/* 0x0002a2000c1e1900 */
/*0d10*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */
/* 0x000fe40007ffe0ff */
/*0d20*/ MOV R5, c[0x0][0x184] ; /* 0x0000610000057a02 */
/* 0x000fe40000000f00 */
/*0d30*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc40003f05270 */
/*0d40*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x001fe20007f3e0ff */
/*0d50*/ IMAD.WIDE R8, R5, 0x4, R8 ; /* 0x0000000405087825 */
/* 0x002fc600078e0208 */
/*0d60*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0d70*/ FFMA R27, R0, R3, R27 ; /* 0x00000003001b7223 */
/* 0x004fcc000000001b */
/*0d80*/ @P0 BRA 0xce0 ; /* 0xffffff5000000947 */
/* 0x000fea000383ffff */
/*0d90*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0da0*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fe20000000f00 */
/*0db0*/ IMAD R2, R2, c[0x0][0x184], R17 ; /* 0x0000610002027a24 */
/* 0x000fe200078e0211 */
/*0dc0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0dd0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x188] ; /* 0x0000620002027625 */
/* 0x000fca00078e0203 */
/*0de0*/ STG.E [R2.64], R27 ; /* 0x0000001b02007986 */
/* 0x000fe2000c101904 */
/*0df0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0e00*/ BRA 0xe00; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0e10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ea0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0eb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ec0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ed0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ee0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ef0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
The shared memory is allocated using the __shared__ memory space specifier.
Shared memory is expected to be much faster than global memory.
The following code sample is a straightforward implementation of matrix multiplication
that does not take advantage of shared memory. Each thread reads one row of A and one
column of B and computes the corresponding element of C. A is therefore read B.columns
times from global memory and B is read A.rows times.
Example adapted from the nVIDIA CUDA 9.1 samples
*/
#include <iostream>
#include <memory>
#include <algorithm>
struct Matrix{
int num_rows;
int num_columns;
float* elements;
};
__global__
void matrixMult(const Matrix a, const Matrix b, Matrix c){
float accumulate = 0.f;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int column = blockDim.x * blockIdx.x + threadIdx.x;
printf("\nthreadIdx(%d) threadIdy(%d)\n",threadIdx.x,threadIdx.y);
for(int i = 0; i != a.num_rows; ++i){
accumulate += a.elements[row * a.num_columns + i] * b.elements[i * c.num_columns + column];
}
c.elements[row * c.num_columns + column] = accumulate;
}
int main(){
size_t dimension = 3;
size_t dimension_matrix = dimension * dimension;
Matrix h_A, h_B, h_C;
h_A.num_rows = h_B.num_rows = h_C.num_rows = dimension;
h_A.num_columns = h_B.num_columns = h_C.num_columns = dimension;
h_A.elements = (float*)malloc(dimension_matrix * sizeof(float));
h_B.elements = (float*)malloc(dimension_matrix * sizeof(float));
h_C.elements = (float*)malloc(dimension_matrix * sizeof(float));
auto generate_element = [n = 1.f]() mutable {return (float)++n;};
std::generate(h_A.elements, h_A.elements + (dimension_matrix), generate_element);
std::generate(h_B.elements, h_B.elements + (dimension_matrix), generate_element);
Matrix d_A, d_B, d_C;
size_t size_bytes = dimension_matrix * sizeof(float);
d_A.num_rows = d_B.num_rows = d_C.num_rows = dimension;
d_A.num_columns = d_B.num_columns = d_C.num_columns = dimension;
cudaMalloc(&d_A.elements, size_bytes);
cudaMalloc(&d_B.elements, size_bytes);
cudaMalloc(&d_C.elements, size_bytes);
cudaMemcpy(d_A.elements, h_A.elements, size_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B.elements, h_B.elements, size_bytes, cudaMemcpyHostToDevice);
// Launching kernel
size_t threads_per_block = dimension;
dim3 dimBlock(threads_per_block,threads_per_block);
dim3 dimGrid(h_B.num_columns / dimBlock.x, h_A.num_rows / dimBlock.y);
std::cout << "\nLaunching CUDA kernel matrixMult<<<" << dimGrid.x
<< ", " << dimBlock.x << ">>>" << '\n';
matrixMult<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(h_C.elements, d_C.elements, size_bytes, cudaMemcpyDeviceToHost);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
// Check Results
for(size_t i = 0; i != dimension; ++i){
for(size_t j = 0; j != dimension; ++j){
int accumulator = 0;
for(size_t k = 0; k != dimension; ++k)
accumulator += h_A.elements[i * h_A.num_columns + k] * h_B.elements[k * h_B.num_columns + j];
if(accumulator != h_C.elements[i * h_A.num_columns + j]){
std::cerr << "Mismatch found in position " << i <<", " << j
<< ": Expected = " << accumulator
<< " Obtained = " << h_C.elements[i * h_A.num_columns + j] << '\n';
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
exit(EXIT_FAILURE);
}
}
}
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
std::cout << "\nSUCCESSFULLY EXECUTED!\n" << std::endl;
return 0;
} | .file "tmpxft_001b79b9_00000000-6_matrixMul.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4547:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4547:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_
.type _Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_, @function
_Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_:
.LFB4569:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movq %rdi, 64(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 80(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 120
pushq 8(%rsp)
.cfi_def_cfa_offset 128
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z10matrixMult6MatrixS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4569:
.size _Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_, .-_Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_
.globl _Z10matrixMult6MatrixS_S_
.type _Z10matrixMult6MatrixS_S_, @function
_Z10matrixMult6MatrixS_S_:
.LFB4570:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %rdi, 32(%rsp)
movq %rsi, 40(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq %rsp, %rdx
leaq 16(%rsp), %rsi
leaq 32(%rsp), %rdi
call _Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4570:
.size _Z10matrixMult6MatrixS_S_, .-_Z10matrixMult6MatrixS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "\nLaunching CUDA kernel matrixMult<<<"
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string ", "
.LC3:
.string ">>>"
.LC4:
.string "Mismatch found in position "
.LC5:
.string ": Expected = "
.LC6:
.string " Obtained = "
.LC7:
.string "\nSUCCESSFULLY EXECUTED!\n"
.text
.globl main
.type main, @function
main:
.LFB4543:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $168, %rsp
.cfi_def_cfa_offset 224
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
movl $36, %edi
call malloc@PLT
movq %rax, %r14
movl $36, %edi
call malloc@PLT
movq %rax, %r12
movl $36, %edi
call malloc@PLT
movq %rax, %r15
leaq 36(%r14), %rdx
movq %r14, %rax
movss .LC0(%rip), %xmm0
movaps %xmm0, %xmm1
.L12:
addss %xmm1, %xmm0
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rax, %rdx
jne .L12
leaq 36(%r12), %rdx
movq %r12, %rax
movss .LC0(%rip), %xmm0
movaps %xmm0, %xmm1
.L13:
addss %xmm1, %xmm0
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rax, %rdx
jne .L13
movl $3, 80(%rsp)
movl $3, 64(%rsp)
movl $3, 48(%rsp)
movl $3, 84(%rsp)
movl $3, 68(%rsp)
movl $3, 52(%rsp)
leaq 56(%rsp), %rdi
movl $36, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $36, %esi
call cudaMalloc@PLT
leaq 88(%rsp), %rdi
movl $36, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $36, %edx
movq %r14, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $36, %edx
movq %r12, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 32(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $1, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $3, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC3(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $10, %esi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c@PLT
movl $1, 36(%rsp)
movl $3, 24(%rsp)
movl $3, 28(%rsp)
movl 32(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movq 36(%rsp), %rdi
movl 44(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L14:
movl $2, %ecx
movl $36, %edx
movq 88(%rsp), %rsi
movq %r15, %rdi
call cudaMemcpy@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq %r14, %rsi
movq %r15, %r8
movl $0, %r13d
movl $0, %edi
jmp .L15
.L28:
movdqa 48(%rsp), %xmm2
movaps %xmm2, 96(%rsp)
movdqa 64(%rsp), %xmm3
movaps %xmm3, 112(%rsp)
movdqa 80(%rsp), %xmm4
movaps %xmm4, 128(%rsp)
leaq 128(%rsp), %rdx
leaq 112(%rsp), %rsi
leaq 96(%rsp), %rdi
call _Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_
jmp .L14
.L23:
leaq .LC4(%rip), %rsi
leaq _ZSt4cerr(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r13, %rsi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %rbp, %rsi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq 8(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $10, %esi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c@PLT
movq %r14, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
movl $1, %edi
call exit@PLT
.L19:
addq $1, %r13
addq $12, %rsi
addq $12, %r8
cmpq $3, %r13
je .L20
.L15:
movq %r8, %rcx
movl $0, %ebp
.L21:
leaq (%r12,%rbp,4), %rdx
movl $0, %eax
movl %edi, %ebx
.L16:
movss (%rsi,%rax,4), %xmm0
mulss (%rdx), %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl %ebx, %xmm1
addss %xmm1, %xmm0
cvttss2sil %xmm0, %ebx
addq $1, %rax
addq $12, %rdx
cmpq $3, %rax
jne .L16
movq %rcx, 8(%rsp)
pxor %xmm0, %xmm0
cvtsi2ssl %ebx, %xmm0
ucomiss (%rcx), %xmm0
jp .L23
jne .L23
addq $1, %rbp
addq $4, %rcx
cmpq $3, %rbp
jne .L21
jmp .L19
.L20:
movq %r14, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
leaq .LC7(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L29
movl $0, %eax
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4543:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z10matrixMult6MatrixS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4572:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z10matrixMult6MatrixS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4572:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
The shared memory is allocated using the __shared__ memory space specifier.
Shared memory is expected to be much faster than global memory.
The following code sample is a straightforward implementation of matrix multiplication
that does not take advantage of shared memory. Each thread reads one row of A and one
column of B and computes the corresponding element of C. A is therefore read B.columns
times from global memory and B is read A.rows times.
Example adapted from the nVIDIA CUDA 9.1 samples
*/
#include <iostream>
#include <memory>
#include <algorithm>
struct Matrix{
int num_rows;
int num_columns;
float* elements;
};
__global__
void matrixMult(const Matrix a, const Matrix b, Matrix c){
float accumulate = 0.f;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int column = blockDim.x * blockIdx.x + threadIdx.x;
printf("\nthreadIdx(%d) threadIdy(%d)\n",threadIdx.x,threadIdx.y);
for(int i = 0; i != a.num_rows; ++i){
accumulate += a.elements[row * a.num_columns + i] * b.elements[i * c.num_columns + column];
}
c.elements[row * c.num_columns + column] = accumulate;
}
int main(){
size_t dimension = 3;
size_t dimension_matrix = dimension * dimension;
Matrix h_A, h_B, h_C;
h_A.num_rows = h_B.num_rows = h_C.num_rows = dimension;
h_A.num_columns = h_B.num_columns = h_C.num_columns = dimension;
h_A.elements = (float*)malloc(dimension_matrix * sizeof(float));
h_B.elements = (float*)malloc(dimension_matrix * sizeof(float));
h_C.elements = (float*)malloc(dimension_matrix * sizeof(float));
auto generate_element = [n = 1.f]() mutable {return (float)++n;};
std::generate(h_A.elements, h_A.elements + (dimension_matrix), generate_element);
std::generate(h_B.elements, h_B.elements + (dimension_matrix), generate_element);
Matrix d_A, d_B, d_C;
size_t size_bytes = dimension_matrix * sizeof(float);
d_A.num_rows = d_B.num_rows = d_C.num_rows = dimension;
d_A.num_columns = d_B.num_columns = d_C.num_columns = dimension;
cudaMalloc(&d_A.elements, size_bytes);
cudaMalloc(&d_B.elements, size_bytes);
cudaMalloc(&d_C.elements, size_bytes);
cudaMemcpy(d_A.elements, h_A.elements, size_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B.elements, h_B.elements, size_bytes, cudaMemcpyHostToDevice);
// Launching kernel
size_t threads_per_block = dimension;
dim3 dimBlock(threads_per_block,threads_per_block);
dim3 dimGrid(h_B.num_columns / dimBlock.x, h_A.num_rows / dimBlock.y);
std::cout << "\nLaunching CUDA kernel matrixMult<<<" << dimGrid.x
<< ", " << dimBlock.x << ">>>" << '\n';
matrixMult<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(h_C.elements, d_C.elements, size_bytes, cudaMemcpyDeviceToHost);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
// Check Results
for(size_t i = 0; i != dimension; ++i){
for(size_t j = 0; j != dimension; ++j){
int accumulator = 0;
for(size_t k = 0; k != dimension; ++k)
accumulator += h_A.elements[i * h_A.num_columns + k] * h_B.elements[k * h_B.num_columns + j];
if(accumulator != h_C.elements[i * h_A.num_columns + j]){
std::cerr << "Mismatch found in position " << i <<", " << j
<< ": Expected = " << accumulator
<< " Obtained = " << h_C.elements[i * h_A.num_columns + j] << '\n';
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
exit(EXIT_FAILURE);
}
}
}
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
std::cout << "\nSUCCESSFULLY EXECUTED!\n" << std::endl;
return 0;
} | /*
The shared memory is allocated using the __shared__ memory space specifier.
Shared memory is expected to be much faster than global memory.
The following code sample is a straightforward implementation of matrix multiplication
that does not take advantage of shared memory. Each thread reads one row of A and one
column of B and computes the corresponding element of C. A is therefore read B.columns
times from global memory and B is read A.rows times.
Example adapted from the nVIDIA CUDA 9.1 samples
*/
#include <hip/hip_runtime.h>
#include <iostream>
#include <memory>
#include <algorithm>
struct Matrix{
int num_rows;
int num_columns;
float* elements;
};
__global__
void matrixMult(const Matrix a, const Matrix b, Matrix c){
float accumulate = 0.f;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int column = blockDim.x * blockIdx.x + threadIdx.x;
printf("\nthreadIdx(%d) threadIdy(%d)\n",threadIdx.x,threadIdx.y);
for(int i = 0; i != a.num_rows; ++i){
accumulate += a.elements[row * a.num_columns + i] * b.elements[i * c.num_columns + column];
}
c.elements[row * c.num_columns + column] = accumulate;
}
int main(){
size_t dimension = 3;
size_t dimension_matrix = dimension * dimension;
Matrix h_A, h_B, h_C;
h_A.num_rows = h_B.num_rows = h_C.num_rows = dimension;
h_A.num_columns = h_B.num_columns = h_C.num_columns = dimension;
h_A.elements = (float*)malloc(dimension_matrix * sizeof(float));
h_B.elements = (float*)malloc(dimension_matrix * sizeof(float));
h_C.elements = (float*)malloc(dimension_matrix * sizeof(float));
auto generate_element = [n = 1.f]() mutable {return (float)++n;};
std::generate(h_A.elements, h_A.elements + (dimension_matrix), generate_element);
std::generate(h_B.elements, h_B.elements + (dimension_matrix), generate_element);
Matrix d_A, d_B, d_C;
size_t size_bytes = dimension_matrix * sizeof(float);
d_A.num_rows = d_B.num_rows = d_C.num_rows = dimension;
d_A.num_columns = d_B.num_columns = d_C.num_columns = dimension;
hipMalloc(&d_A.elements, size_bytes);
hipMalloc(&d_B.elements, size_bytes);
hipMalloc(&d_C.elements, size_bytes);
hipMemcpy(d_A.elements, h_A.elements, size_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_B.elements, h_B.elements, size_bytes, hipMemcpyHostToDevice);
// Launching kernel
size_t threads_per_block = dimension;
dim3 dimBlock(threads_per_block,threads_per_block);
dim3 dimGrid(h_B.num_columns / dimBlock.x, h_A.num_rows / dimBlock.y);
std::cout << "\nLaunching CUDA kernel matrixMult<<<" << dimGrid.x
<< ", " << dimBlock.x << ">>>" << '\n';
matrixMult<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
hipMemcpy(h_C.elements, d_C.elements, size_bytes, hipMemcpyDeviceToHost);
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
// Check Results
for(size_t i = 0; i != dimension; ++i){
for(size_t j = 0; j != dimension; ++j){
int accumulator = 0;
for(size_t k = 0; k != dimension; ++k)
accumulator += h_A.elements[i * h_A.num_columns + k] * h_B.elements[k * h_B.num_columns + j];
if(accumulator != h_C.elements[i * h_A.num_columns + j]){
std::cerr << "Mismatch found in position " << i <<", " << j
<< ": Expected = " << accumulator
<< " Obtained = " << h_C.elements[i * h_A.num_columns + j] << '\n';
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
exit(EXIT_FAILURE);
}
}
}
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
std::cout << "\nSUCCESSFULLY EXECUTED!\n" << std::endl;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
The shared memory is allocated using the __shared__ memory space specifier.
Shared memory is expected to be much faster than global memory.
The following code sample is a straightforward implementation of matrix multiplication
that does not take advantage of shared memory. Each thread reads one row of A and one
column of B and computes the corresponding element of C. A is therefore read B.columns
times from global memory and B is read A.rows times.
Example adapted from the nVIDIA CUDA 9.1 samples
*/
#include <hip/hip_runtime.h>
#include <iostream>
#include <memory>
#include <algorithm>
struct Matrix{
int num_rows;
int num_columns;
float* elements;
};
__global__
void matrixMult(const Matrix a, const Matrix b, Matrix c){
float accumulate = 0.f;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int column = blockDim.x * blockIdx.x + threadIdx.x;
printf("\nthreadIdx(%d) threadIdy(%d)\n",threadIdx.x,threadIdx.y);
for(int i = 0; i != a.num_rows; ++i){
accumulate += a.elements[row * a.num_columns + i] * b.elements[i * c.num_columns + column];
}
c.elements[row * c.num_columns + column] = accumulate;
}
int main(){
size_t dimension = 3;
size_t dimension_matrix = dimension * dimension;
Matrix h_A, h_B, h_C;
h_A.num_rows = h_B.num_rows = h_C.num_rows = dimension;
h_A.num_columns = h_B.num_columns = h_C.num_columns = dimension;
h_A.elements = (float*)malloc(dimension_matrix * sizeof(float));
h_B.elements = (float*)malloc(dimension_matrix * sizeof(float));
h_C.elements = (float*)malloc(dimension_matrix * sizeof(float));
auto generate_element = [n = 1.f]() mutable {return (float)++n;};
std::generate(h_A.elements, h_A.elements + (dimension_matrix), generate_element);
std::generate(h_B.elements, h_B.elements + (dimension_matrix), generate_element);
Matrix d_A, d_B, d_C;
size_t size_bytes = dimension_matrix * sizeof(float);
d_A.num_rows = d_B.num_rows = d_C.num_rows = dimension;
d_A.num_columns = d_B.num_columns = d_C.num_columns = dimension;
hipMalloc(&d_A.elements, size_bytes);
hipMalloc(&d_B.elements, size_bytes);
hipMalloc(&d_C.elements, size_bytes);
hipMemcpy(d_A.elements, h_A.elements, size_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_B.elements, h_B.elements, size_bytes, hipMemcpyHostToDevice);
// Launching kernel
size_t threads_per_block = dimension;
dim3 dimBlock(threads_per_block,threads_per_block);
dim3 dimGrid(h_B.num_columns / dimBlock.x, h_A.num_rows / dimBlock.y);
std::cout << "\nLaunching CUDA kernel matrixMult<<<" << dimGrid.x
<< ", " << dimBlock.x << ">>>" << '\n';
matrixMult<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
hipMemcpy(h_C.elements, d_C.elements, size_bytes, hipMemcpyDeviceToHost);
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
// Check Results
for(size_t i = 0; i != dimension; ++i){
for(size_t j = 0; j != dimension; ++j){
int accumulator = 0;
for(size_t k = 0; k != dimension; ++k)
accumulator += h_A.elements[i * h_A.num_columns + k] * h_B.elements[k * h_B.num_columns + j];
if(accumulator != h_C.elements[i * h_A.num_columns + j]){
std::cerr << "Mismatch found in position " << i <<", " << j
<< ": Expected = " << accumulator
<< " Obtained = " << h_C.elements[i * h_A.num_columns + j] << '\n';
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
exit(EXIT_FAILURE);
}
}
}
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
std::cout << "\nSUCCESSFULLY EXECUTED!\n" << std::endl;
return 0;
} | .text
.file "matrixMul.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z25__device_stub__matrixMult6MatrixS_S_ # -- Begin function _Z25__device_stub__matrixMult6MatrixS_S_
.p2align 4, 0x90
.type _Z25__device_stub__matrixMult6MatrixS_S_,@function
_Z25__device_stub__matrixMult6MatrixS_S_: # @_Z25__device_stub__matrixMult6MatrixS_S_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 80(%rsp)
movq %rsi, 88(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10matrixMult6MatrixS_S_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z25__device_stub__matrixMult6MatrixS_S_, .Lfunc_end0-_Z25__device_stub__matrixMult6MatrixS_S_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x3f800000 # float 1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $36, %edi
callq malloc
movq %rax, %r15
movl $36, %edi
callq malloc
movq %rax, %r14
movl $36, %edi
callq malloc
movq %rax, %rbx
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %ecx, %ecx
movaps %xmm0, %xmm1
.p2align 4, 0x90
.LBB1_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
addss %xmm0, %xmm1
movss %xmm1, (%r15,%rcx)
addq $4, %rcx
cmpq $36, %rcx
jne .LBB1_1
# %bb.2: # %.lr.ph.i87.preheader
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %eax, %eax
movaps %xmm0, %xmm1
.p2align 4, 0x90
.LBB1_3: # %.lr.ph.i87
# =>This Inner Loop Header: Depth=1
addss %xmm0, %xmm1
movss %xmm1, (%r14,%rax)
addq $4, %rax
cmpq $36, %rax
jne .LBB1_3
# %bb.4: # %_ZSt8generateIPfZ4mainEUlvE_EvT_S2_T0_.exit91
movabsq $12884901891, %rax # imm = 0x300000003
movq %rax, (%rsp)
movq %rax, 16(%rsp)
movq %rax, 32(%rsp)
leaq 40(%rsp), %rdi
movl $36, %esi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $36, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $36, %esi
callq hipMalloc
movq 40(%rsp), %rdi
movl $36, %edx
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $36, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $36, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $1, %esi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r12
movl $.L.str.1, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $3, %esi
movq %r12, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r12
movl $.L.str.2, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movb $10, 48(%rsp)
movq (%r12), %rax
movq -24(%rax), %rax
cmpq $0, 16(%r12,%rax)
je .LBB1_6
# %bb.5:
leaq 48(%rsp), %rsi
movl $1, %edx
movq %r12, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB1_7
.LBB1_6:
movq %r12, %rdi
movl $10, %esi
callq _ZNSo3putEc
.LBB1_7: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c.exit
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $12884901891, %rdx # imm = 0x300000003
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_9
# %bb.8:
movups 32(%rsp), %xmm0
movups 16(%rsp), %xmm1
movups (%rsp), %xmm2
movups %xmm0, 168(%rsp)
movups %xmm1, 152(%rsp)
movups %xmm2, 136(%rsp)
leaq 168(%rsp), %rax
movq %rax, 48(%rsp)
leaq 152(%rsp), %rax
movq %rax, 56(%rsp)
leaq 136(%rsp), %rax
movq %rax, 64(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10matrixMult6MatrixS_S_, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_9:
movq 8(%rsp), %rsi
movl $36, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
xorl %r13d, %r13d
movq %r15, %rax
.p2align 4, 0x90
.LBB1_10: # %.preheader98
# =>This Loop Header: Depth=1
# Child Loop BB1_11 Depth 2
# Child Loop BB1_12 Depth 3
leaq (,%r13,2), %rcx
addq %r13, %rcx
leaq (%rdi,%rcx,4), %rbx
movq %r14, %rcx
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_11: # %.preheader
# Parent Loop BB1_10 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_12 Depth 3
xorl %edx, %edx
movq %rcx, %rsi
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_12: # Parent Loop BB1_10 Depth=1
# Parent Loop BB1_11 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rax,%rdx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
mulss (%rsi), %xmm0
xorps %xmm1, %xmm1
cvtsi2ss %ebp, %xmm1
addss %xmm0, %xmm1
cvttss2si %xmm1, %ebp
incq %rdx
addq $12, %rsi
cmpq $3, %rdx
jne .LBB1_12
# %bb.13: # in Loop: Header=BB1_11 Depth=2
xorps %xmm0, %xmm0
cvtsi2ss %ebp, %xmm0
movss (%rbx,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm1
jne .LBB1_22
jp .LBB1_22
# %bb.14: # in Loop: Header=BB1_11 Depth=2
incq %r12
addq $4, %rcx
cmpq $3, %r12
jne .LBB1_11
# %bb.15: # in Loop: Header=BB1_10 Depth=1
incq %r13
addq $12, %rax
cmpq $3, %r13
jne .LBB1_10
# %bb.16:
movq %rdi, %rbx
movq %r15, %rdi
callq free
movq %r14, %rdi
callq free
movq %rbx, %rdi
callq free
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $24, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB1_21
# %bb.17: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_19
# %bb.18:
movzbl 67(%rbx), %eax
jmp .LBB1_20
.LBB1_19:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_20: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_22:
.cfi_def_cfa_offset 240
movq %rdi, 80(%rsp) # 8-byte Spill
movl $_ZSt4cerr, %edi
movl $.L.str.3, %esi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movq %r13, %rsi
callq _ZNSolsEm
movl $.L.str.1, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movq %r12, %rsi
callq _ZNSolsEm
movl $.L.str.4, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movl %ebp, %esi
callq _ZNSolsEi
movl $.L.str.5, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq %rax, %rdi
callq _ZNSolsEf
movq %rax, %rdi
movl $10, %esi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c
movq %r15, %rdi
callq free
movq %r14, %rdi
callq free
movq 80(%rsp), %rdi # 8-byte Reload
callq free
movl $1, %edi
callq exit
.LBB1_21:
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10matrixMult6MatrixS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10matrixMult6MatrixS_S_,@object # @_Z10matrixMult6MatrixS_S_
.section .rodata,"a",@progbits
.globl _Z10matrixMult6MatrixS_S_
.p2align 3, 0x0
_Z10matrixMult6MatrixS_S_:
.quad _Z25__device_stub__matrixMult6MatrixS_S_
.size _Z10matrixMult6MatrixS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\nLaunching CUDA kernel matrixMult<<<"
.size .L.str, 37
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz ", "
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz ">>>"
.size .L.str.2, 4
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Mismatch found in position "
.size .L.str.3, 28
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz ": Expected = "
.size .L.str.4, 14
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz " Obtained = "
.size .L.str.5, 14
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\nSUCCESSFULLY EXECUTED!\n"
.size .L.str.6, 25
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10matrixMult6MatrixS_S_"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__matrixMult6MatrixS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10matrixMult6MatrixS_S_
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt4cerr
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001b79b9_00000000-6_matrixMul.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4547:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4547:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_
.type _Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_, @function
_Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_:
.LFB4569:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movq %rdi, 64(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 80(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 120
pushq 8(%rsp)
.cfi_def_cfa_offset 128
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z10matrixMult6MatrixS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4569:
.size _Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_, .-_Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_
.globl _Z10matrixMult6MatrixS_S_
.type _Z10matrixMult6MatrixS_S_, @function
_Z10matrixMult6MatrixS_S_:
.LFB4570:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %rdi, 32(%rsp)
movq %rsi, 40(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq %rsp, %rdx
leaq 16(%rsp), %rsi
leaq 32(%rsp), %rdi
call _Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4570:
.size _Z10matrixMult6MatrixS_S_, .-_Z10matrixMult6MatrixS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "\nLaunching CUDA kernel matrixMult<<<"
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string ", "
.LC3:
.string ">>>"
.LC4:
.string "Mismatch found in position "
.LC5:
.string ": Expected = "
.LC6:
.string " Obtained = "
.LC7:
.string "\nSUCCESSFULLY EXECUTED!\n"
.text
.globl main
.type main, @function
main:
.LFB4543:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $168, %rsp
.cfi_def_cfa_offset 224
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
movl $36, %edi
call malloc@PLT
movq %rax, %r14
movl $36, %edi
call malloc@PLT
movq %rax, %r12
movl $36, %edi
call malloc@PLT
movq %rax, %r15
leaq 36(%r14), %rdx
movq %r14, %rax
movss .LC0(%rip), %xmm0
movaps %xmm0, %xmm1
.L12:
addss %xmm1, %xmm0
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rax, %rdx
jne .L12
leaq 36(%r12), %rdx
movq %r12, %rax
movss .LC0(%rip), %xmm0
movaps %xmm0, %xmm1
.L13:
addss %xmm1, %xmm0
movss %xmm0, (%rax)
addq $4, %rax
cmpq %rax, %rdx
jne .L13
movl $3, 80(%rsp)
movl $3, 64(%rsp)
movl $3, 48(%rsp)
movl $3, 84(%rsp)
movl $3, 68(%rsp)
movl $3, 52(%rsp)
leaq 56(%rsp), %rdi
movl $36, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $36, %esi
call cudaMalloc@PLT
leaq 88(%rsp), %rdi
movl $36, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $36, %edx
movq %r14, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $36, %edx
movq %r12, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 32(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $1, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $3, %esi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC3(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $10, %esi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c@PLT
movl $1, 36(%rsp)
movl $3, 24(%rsp)
movl $3, 28(%rsp)
movl 32(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movq 36(%rsp), %rdi
movl 44(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L14:
movl $2, %ecx
movl $36, %edx
movq 88(%rsp), %rsi
movq %r15, %rdi
call cudaMemcpy@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq %r14, %rsi
movq %r15, %r8
movl $0, %r13d
movl $0, %edi
jmp .L15
.L28:
movdqa 48(%rsp), %xmm2
movaps %xmm2, 96(%rsp)
movdqa 64(%rsp), %xmm3
movaps %xmm3, 112(%rsp)
movdqa 80(%rsp), %xmm4
movaps %xmm4, 128(%rsp)
leaq 128(%rsp), %rdx
leaq 112(%rsp), %rsi
leaq 96(%rsp), %rdi
call _Z39__device_stub__Z10matrixMult6MatrixS_S_RK6MatrixS1_RS_
jmp .L14
.L23:
leaq .LC4(%rip), %rsi
leaq _ZSt4cerr(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r13, %rsi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %rbp, %rsi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC6(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq 8(%rsp), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $10, %esi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c@PLT
movq %r14, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
movl $1, %edi
call exit@PLT
.L19:
addq $1, %r13
addq $12, %rsi
addq $12, %r8
cmpq $3, %r13
je .L20
.L15:
movq %r8, %rcx
movl $0, %ebp
.L21:
leaq (%r12,%rbp,4), %rdx
movl $0, %eax
movl %edi, %ebx
.L16:
movss (%rsi,%rax,4), %xmm0
mulss (%rdx), %xmm0
pxor %xmm1, %xmm1
cvtsi2ssl %ebx, %xmm1
addss %xmm1, %xmm0
cvttss2sil %xmm0, %ebx
addq $1, %rax
addq $12, %rdx
cmpq $3, %rax
jne .L16
movq %rcx, 8(%rsp)
pxor %xmm0, %xmm0
cvtsi2ssl %ebx, %xmm0
ucomiss (%rcx), %xmm0
jp .L23
jne .L23
addq $1, %rbp
addq $4, %rcx
cmpq $3, %rbp
jne .L21
jmp .L19
.L20:
movq %r14, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
leaq .LC7(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L29
movl $0, %eax
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4543:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z10matrixMult6MatrixS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4572:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z10matrixMult6MatrixS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4572:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matrixMul.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z25__device_stub__matrixMult6MatrixS_S_ # -- Begin function _Z25__device_stub__matrixMult6MatrixS_S_
.p2align 4, 0x90
.type _Z25__device_stub__matrixMult6MatrixS_S_,@function
_Z25__device_stub__matrixMult6MatrixS_S_: # @_Z25__device_stub__matrixMult6MatrixS_S_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 80(%rsp)
movq %rsi, 88(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10matrixMult6MatrixS_S_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z25__device_stub__matrixMult6MatrixS_S_, .Lfunc_end0-_Z25__device_stub__matrixMult6MatrixS_S_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x3f800000 # float 1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $36, %edi
callq malloc
movq %rax, %r15
movl $36, %edi
callq malloc
movq %rax, %r14
movl $36, %edi
callq malloc
movq %rax, %rbx
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %ecx, %ecx
movaps %xmm0, %xmm1
.p2align 4, 0x90
.LBB1_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
addss %xmm0, %xmm1
movss %xmm1, (%r15,%rcx)
addq $4, %rcx
cmpq $36, %rcx
jne .LBB1_1
# %bb.2: # %.lr.ph.i87.preheader
movss .LCPI1_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %eax, %eax
movaps %xmm0, %xmm1
.p2align 4, 0x90
.LBB1_3: # %.lr.ph.i87
# =>This Inner Loop Header: Depth=1
addss %xmm0, %xmm1
movss %xmm1, (%r14,%rax)
addq $4, %rax
cmpq $36, %rax
jne .LBB1_3
# %bb.4: # %_ZSt8generateIPfZ4mainEUlvE_EvT_S2_T0_.exit91
movabsq $12884901891, %rax # imm = 0x300000003
movq %rax, (%rsp)
movq %rax, 16(%rsp)
movq %rax, 32(%rsp)
leaq 40(%rsp), %rdi
movl $36, %esi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $36, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $36, %esi
callq hipMalloc
movq 40(%rsp), %rdi
movl $36, %edx
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $36, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $36, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl $1, %esi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r12
movl $.L.str.1, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $3, %esi
movq %r12, %rdi
callq _ZNSo9_M_insertImEERSoT_
movq %rax, %r12
movl $.L.str.2, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movb $10, 48(%rsp)
movq (%r12), %rax
movq -24(%rax), %rax
cmpq $0, 16(%r12,%rax)
je .LBB1_6
# %bb.5:
leaq 48(%rsp), %rsi
movl $1, %edx
movq %r12, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB1_7
.LBB1_6:
movq %r12, %rdi
movl $10, %esi
callq _ZNSo3putEc
.LBB1_7: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c.exit
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $12884901891, %rdx # imm = 0x300000003
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_9
# %bb.8:
movups 32(%rsp), %xmm0
movups 16(%rsp), %xmm1
movups (%rsp), %xmm2
movups %xmm0, 168(%rsp)
movups %xmm1, 152(%rsp)
movups %xmm2, 136(%rsp)
leaq 168(%rsp), %rax
movq %rax, 48(%rsp)
leaq 152(%rsp), %rax
movq %rax, 56(%rsp)
leaq 136(%rsp), %rax
movq %rax, 64(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10matrixMult6MatrixS_S_, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_9:
movq 8(%rsp), %rsi
movl $36, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
xorl %r13d, %r13d
movq %r15, %rax
.p2align 4, 0x90
.LBB1_10: # %.preheader98
# =>This Loop Header: Depth=1
# Child Loop BB1_11 Depth 2
# Child Loop BB1_12 Depth 3
leaq (,%r13,2), %rcx
addq %r13, %rcx
leaq (%rdi,%rcx,4), %rbx
movq %r14, %rcx
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_11: # %.preheader
# Parent Loop BB1_10 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_12 Depth 3
xorl %edx, %edx
movq %rcx, %rsi
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_12: # Parent Loop BB1_10 Depth=1
# Parent Loop BB1_11 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rax,%rdx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
mulss (%rsi), %xmm0
xorps %xmm1, %xmm1
cvtsi2ss %ebp, %xmm1
addss %xmm0, %xmm1
cvttss2si %xmm1, %ebp
incq %rdx
addq $12, %rsi
cmpq $3, %rdx
jne .LBB1_12
# %bb.13: # in Loop: Header=BB1_11 Depth=2
xorps %xmm0, %xmm0
cvtsi2ss %ebp, %xmm0
movss (%rbx,%r12,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm1
jne .LBB1_22
jp .LBB1_22
# %bb.14: # in Loop: Header=BB1_11 Depth=2
incq %r12
addq $4, %rcx
cmpq $3, %r12
jne .LBB1_11
# %bb.15: # in Loop: Header=BB1_10 Depth=1
incq %r13
addq $12, %rax
cmpq $3, %r13
jne .LBB1_10
# %bb.16:
movq %rdi, %rbx
movq %r15, %rdi
callq free
movq %r14, %rdi
callq free
movq %rbx, %rdi
callq free
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $24, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB1_21
# %bb.17: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_19
# %bb.18:
movzbl 67(%rbx), %eax
jmp .LBB1_20
.LBB1_19:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_20: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_22:
.cfi_def_cfa_offset 240
movq %rdi, 80(%rsp) # 8-byte Spill
movl $_ZSt4cerr, %edi
movl $.L.str.3, %esi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movq %r13, %rsi
callq _ZNSolsEm
movl $.L.str.1, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movq %r12, %rsi
callq _ZNSolsEm
movl $.L.str.4, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movl %ebp, %esi
callq _ZNSolsEi
movl $.L.str.5, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq %rax, %rdi
callq _ZNSolsEf
movq %rax, %rdi
movl $10, %esi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c
movq %r15, %rdi
callq free
movq %r14, %rdi
callq free
movq 80(%rsp), %rdi # 8-byte Reload
callq free
movl $1, %edi
callq exit
.LBB1_21:
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10matrixMult6MatrixS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10matrixMult6MatrixS_S_,@object # @_Z10matrixMult6MatrixS_S_
.section .rodata,"a",@progbits
.globl _Z10matrixMult6MatrixS_S_
.p2align 3, 0x0
_Z10matrixMult6MatrixS_S_:
.quad _Z25__device_stub__matrixMult6MatrixS_S_
.size _Z10matrixMult6MatrixS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\nLaunching CUDA kernel matrixMult<<<"
.size .L.str, 37
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz ", "
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz ">>>"
.size .L.str.2, 4
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Mismatch found in position "
.size .L.str.3, 28
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz ": Expected = "
.size .L.str.4, 14
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz " Obtained = "
.size .L.str.5, 14
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\nSUCCESSFULLY EXECUTED!\n"
.size .L.str.6, 25
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10matrixMult6MatrixS_S_"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__matrixMult6MatrixS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10matrixMult6MatrixS_S_
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt4cerr
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_abs(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)fabs(A[index]);
}
} | code for sm_80
Function : matrix_abs
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*00a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fe200078e0205 */
/*00b0*/ DADD R6, -RZ, |R2| ; /* 0x00000000ff067229 */
/* 0x004e0e0000000502 */
/*00c0*/ STG.E.64 [R4.64], R6 ; /* 0x0000000604007986 */
/* 0x001fe2000c101b04 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_abs(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)fabs(A[index]);
}
} | .file "tmpxft_0013e1e9_00000000-6_matrix_abs.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z10matrix_absPdS_jPdS_j
.type _Z33__device_stub__Z10matrix_absPdS_jPdS_j, @function
_Z33__device_stub__Z10matrix_absPdS_jPdS_j:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq matrix_abs(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z10matrix_absPdS_jPdS_j, .-_Z33__device_stub__Z10matrix_absPdS_jPdS_j
.globl matrix_abs
.type matrix_abs, @function
matrix_abs:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z10matrix_absPdS_jPdS_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size matrix_abs, .-matrix_abs
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "matrix_abs"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq matrix_abs(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_abs(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)fabs(A[index]);
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_abs(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)fabs(A[index]);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_abs(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)fabs(A[index]);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected matrix_abs
.globl matrix_abs
.p2align 8
.type matrix_abs,@function
matrix_abs:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_and_b32_e32 v3, 0x7fffffff, v3
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel matrix_abs
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size matrix_abs, .Lfunc_end0-matrix_abs
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: matrix_abs
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: matrix_abs.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_abs(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = (double)fabs(A[index]);
}
} | .text
.file "matrix_abs.hip"
.globl __device_stub__matrix_abs # -- Begin function __device_stub__matrix_abs
.p2align 4, 0x90
.type __device_stub__matrix_abs,@function
__device_stub__matrix_abs: # @__device_stub__matrix_abs
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $matrix_abs, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size __device_stub__matrix_abs, .Lfunc_end0-__device_stub__matrix_abs
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $matrix_abs, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type matrix_abs,@object # @matrix_abs
.section .rodata,"a",@progbits
.globl matrix_abs
.p2align 3, 0x0
matrix_abs:
.quad __device_stub__matrix_abs
.size matrix_abs, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "matrix_abs"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__matrix_abs
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym matrix_abs
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : matrix_abs
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*00a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fe200078e0205 */
/*00b0*/ DADD R6, -RZ, |R2| ; /* 0x00000000ff067229 */
/* 0x004e0e0000000502 */
/*00c0*/ STG.E.64 [R4.64], R6 ; /* 0x0000000604007986 */
/* 0x001fe2000c101b04 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected matrix_abs
.globl matrix_abs
.p2align 8
.type matrix_abs,@function
matrix_abs:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_and_b32_e32 v3, 0x7fffffff, v3
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel matrix_abs
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size matrix_abs, .Lfunc_end0-matrix_abs
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: matrix_abs
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: matrix_abs.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0013e1e9_00000000-6_matrix_abs.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z10matrix_absPdS_jPdS_j
.type _Z33__device_stub__Z10matrix_absPdS_jPdS_j, @function
_Z33__device_stub__Z10matrix_absPdS_jPdS_j:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq matrix_abs(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z10matrix_absPdS_jPdS_j, .-_Z33__device_stub__Z10matrix_absPdS_jPdS_j
.globl matrix_abs
.type matrix_abs, @function
matrix_abs:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z10matrix_absPdS_jPdS_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size matrix_abs, .-matrix_abs
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "matrix_abs"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq matrix_abs(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matrix_abs.hip"
.globl __device_stub__matrix_abs # -- Begin function __device_stub__matrix_abs
.p2align 4, 0x90
.type __device_stub__matrix_abs,@function
__device_stub__matrix_abs: # @__device_stub__matrix_abs
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $matrix_abs, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size __device_stub__matrix_abs, .Lfunc_end0-__device_stub__matrix_abs
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $matrix_abs, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type matrix_abs,@object # @matrix_abs
.section .rodata,"a",@progbits
.globl matrix_abs
.p2align 3, 0x0
matrix_abs:
.quad __device_stub__matrix_abs
.size matrix_abs, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "matrix_abs"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__matrix_abs
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym matrix_abs
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
__global__ void helloFromGPU() {
if (threadIdx.x < 20 && blockIdx.x < 20)
printf("Hello World from GPU! %d %d\n", threadIdx.x, blockIdx.x);
}
int main(int argc, char**argv) {
printf("Hello World from CPU!\n");
// 2 milhões blocos de 1024 threads
long long int blocks = 2 * 1e6;
long long int threads = 1024; // Numero maximo suportada pela GPU que rodamos
helloFromGPU<<<blocks, threads>>>();
cudaDeviceReset();
return 0;
} | code for sm_80
Function : _Z12helloFromGPUv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc800078e00ff */
/*0010*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000e220000002500 */
/*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fc60007ffe0ff */
/*0030*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */
/* 0x000e620000002100 */
/*0040*/ IADD3 R6, P1, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe40007f3e0ff */
/*0050*/ ISETP.GT.U32.AND P0, PT, R9, 0x13, PT ; /* 0x000000130900780c */
/* 0x001fc80003f04070 */
/*0060*/ ISETP.GT.U32.OR P0, PT, R8, 0x13, P0 ; /* 0x000000130800780c */
/* 0x002fda0000704470 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0090*/ STL.64 [R1], R8 ; /* 0x0000000801007387 */
/* 0x0001e20000100a00 */
/*00a0*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P1 ; /* 0x00000900ff077624 */
/* 0x000fe400008e06ff */
/*00b0*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x0000620000000a00 */
/*00c0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe400078e00ff */
/*00d0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fca00078e00ff */
/*00e0*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x001fe40000000000 */
/*00f0*/ MOV R11, 0x160 ; /* 0x00000160000b7802 */
/* 0x000fe40000000f00 */
/*0100*/ MOV R20, 0xe0 ; /* 0x000000e000147802 */
/* 0x000fe40000000f00 */
/*0110*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fc40000000f00 */
/*0120*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0130*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*0140*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*0150*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
__global__ void helloFromGPU() {
if (threadIdx.x < 20 && blockIdx.x < 20)
printf("Hello World from GPU! %d %d\n", threadIdx.x, blockIdx.x);
}
int main(int argc, char**argv) {
printf("Hello World from CPU!\n");
// 2 milhões blocos de 1024 threads
long long int blocks = 2 * 1e6;
long long int threads = 1024; // Numero maximo suportada pela GPU que rodamos
helloFromGPU<<<blocks, threads>>>();
cudaDeviceReset();
return 0;
} | .file "tmpxft_00053dfe_00000000-6_ex3.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z12helloFromGPUvv
.type _Z31__device_stub__Z12helloFromGPUvv, @function
_Z31__device_stub__Z12helloFromGPUvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z12helloFromGPUv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z31__device_stub__Z12helloFromGPUvv, .-_Z31__device_stub__Z12helloFromGPUvv
.globl _Z12helloFromGPUv
.type _Z12helloFromGPUv, @function
_Z12helloFromGPUv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z12helloFromGPUvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z12helloFromGPUv, .-_Z12helloFromGPUv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello World from CPU!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1024, 20(%rsp)
movl $1, 24(%rsp)
movl $2000000, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
call cudaDeviceReset@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z31__device_stub__Z12helloFromGPUvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z12helloFromGPUv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z12helloFromGPUv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
__global__ void helloFromGPU() {
if (threadIdx.x < 20 && blockIdx.x < 20)
printf("Hello World from GPU! %d %d\n", threadIdx.x, blockIdx.x);
}
int main(int argc, char**argv) {
printf("Hello World from CPU!\n");
// 2 milhões blocos de 1024 threads
long long int blocks = 2 * 1e6;
long long int threads = 1024; // Numero maximo suportada pela GPU que rodamos
helloFromGPU<<<blocks, threads>>>();
cudaDeviceReset();
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void helloFromGPU() {
if (threadIdx.x < 20 && blockIdx.x < 20)
printf("Hello World from GPU! %d %d\n", threadIdx.x, blockIdx.x);
}
int main(int argc, char**argv) {
printf("Hello World from CPU!\n");
// 2 milhões blocos de 1024 threads
long long int blocks = 2 * 1e6;
long long int threads = 1024; // Numero maximo suportada pela GPU que rodamos
helloFromGPU<<<blocks, threads>>>();
hipDeviceReset();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void helloFromGPU() {
if (threadIdx.x < 20 && blockIdx.x < 20)
printf("Hello World from GPU! %d %d\n", threadIdx.x, blockIdx.x);
}
int main(int argc, char**argv) {
printf("Hello World from CPU!\n");
// 2 milhões blocos de 1024 threads
long long int blocks = 2 * 1e6;
long long int threads = 1024; // Numero maximo suportada pela GPU que rodamos
helloFromGPU<<<blocks, threads>>>();
hipDeviceReset();
return 0;
} | .text
.file "ex3.hip"
.globl _Z27__device_stub__helloFromGPUv # -- Begin function _Z27__device_stub__helloFromGPUv
.p2align 4, 0x90
.type _Z27__device_stub__helloFromGPUv,@function
_Z27__device_stub__helloFromGPUv: # @_Z27__device_stub__helloFromGPUv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z27__device_stub__helloFromGPUv, .Lfunc_end0-_Z27__device_stub__helloFromGPUv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $.Lstr, %edi
callq puts@PLT
movabsq $4294968320, %rdx # imm = 0x100000400
leaq 1998976(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceReset
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12helloFromGPUv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12helloFromGPUv,@object # @_Z12helloFromGPUv
.section .rodata,"a",@progbits
.globl _Z12helloFromGPUv
.p2align 3, 0x0
_Z12helloFromGPUv:
.quad _Z27__device_stub__helloFromGPUv
.size _Z12helloFromGPUv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12helloFromGPUv"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello World from CPU!"
.size .Lstr, 22
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__helloFromGPUv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12helloFromGPUv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00053dfe_00000000-6_ex3.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z12helloFromGPUvv
.type _Z31__device_stub__Z12helloFromGPUvv, @function
_Z31__device_stub__Z12helloFromGPUvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z12helloFromGPUv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z31__device_stub__Z12helloFromGPUvv, .-_Z31__device_stub__Z12helloFromGPUvv
.globl _Z12helloFromGPUv
.type _Z12helloFromGPUv, @function
_Z12helloFromGPUv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z12helloFromGPUvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z12helloFromGPUv, .-_Z12helloFromGPUv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello World from CPU!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1024, 20(%rsp)
movl $1, 24(%rsp)
movl $2000000, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
call cudaDeviceReset@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z31__device_stub__Z12helloFromGPUvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z12helloFromGPUv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z12helloFromGPUv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "ex3.hip"
.globl _Z27__device_stub__helloFromGPUv # -- Begin function _Z27__device_stub__helloFromGPUv
.p2align 4, 0x90
.type _Z27__device_stub__helloFromGPUv,@function
_Z27__device_stub__helloFromGPUv: # @_Z27__device_stub__helloFromGPUv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z27__device_stub__helloFromGPUv, .Lfunc_end0-_Z27__device_stub__helloFromGPUv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $.Lstr, %edi
callq puts@PLT
movabsq $4294968320, %rdx # imm = 0x100000400
leaq 1998976(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z12helloFromGPUv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceReset
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12helloFromGPUv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12helloFromGPUv,@object # @_Z12helloFromGPUv
.section .rodata,"a",@progbits
.globl _Z12helloFromGPUv
.p2align 3, 0x0
_Z12helloFromGPUv:
.quad _Z27__device_stub__helloFromGPUv
.size _Z12helloFromGPUv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12helloFromGPUv"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello World from CPU!"
.size .Lstr, 22
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__helloFromGPUv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12helloFromGPUv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <iostream>
#include <math.h>
#include "cuda.h"
#include <time.h>
#define BLOCK_DIM 16
__global__ void computeDistance(float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
// They will contain, for each thread, the current coordinates of A and B - block_dim in each step
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x; // Local query point index
int ty = threadIdx.y; // Local ref point index
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y; // Each block has its own start on ref points
begin_B = BLOCK_DIM * blockIdx.x; // Each block has its own start on query points
step_A = BLOCK_DIM * pA; // next step = next row of the big matrix
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA; // Each submatrix treated by given block has BLOCK_DIM columns and dim rows
// Conditions
int cond0 = (begin_A + tx < wA); // current column is out of A
int cond1 = (begin_B + tx < wB); // current column is out of B
int cond2 = (begin_A + ty < wA); // ty is column number in A
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
// ty corresponds to row, tx to column in the resulting matrix, as well as ref and query points in input,
// but when copying to local memory, they work just as numbers for indeces (tx is column number in both cases)
// a/pA + ty is the row number in A corresponding to this thread in this block
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if(cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
// Selection sort
__global__ void sort(float *dist, int *ind, int width, int pitch, int ind_pitch, int height, int k){
// Variables
int l, i, min_index;
float *p_dist;
int *p_ind;
float min_value, tmp;
// xIndex is column in the sorted matrix
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
p_dist = dist+xIndex;
p_ind = ind+xIndex;
min_value = *p_dist;
for (l = 0; l < k; l++) {
min_index = l;
min_value = *(p_dist+l*pitch);
for (i=l+1; i < height; i++) {
if (*(p_dist+i*pitch) < min_value) {
min_index = i;
min_value = *(p_dist+i*pitch);
}
}
if (min_index != l) {
tmp = *(p_dist+min_index*pitch);
*(p_dist+min_index*pitch) = *(p_dist+l*pitch);
*(p_dist+l*pitch) = tmp;
}
p_ind[l*ind_pitch] = min_index;
}
}
}
__global__ void parallelSqrt(float *dist, int width, int pitch, int k) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
// Compute the mean of the first k elements
__global__ void mean(float *dist, int width, int pitch, float *res, int k) {
float sum;
float *p;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
sum = 0;
p = dist + xIndex;
for (int l = 0; l < k*pitch; l += pitch) sum += *(p+l);
res[xIndex] = sum/k;
}
}
void printErrorMessage(cudaError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", cudaGetErrorString(error));
printf("Wished allocated memory : %d\n", memorySize);
printf("==================================================\n");
}
void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host, float* res_host, int *ind_host) {
// Initialize variables
float *ref_dev;
float *query_dev;
float *dist_dev;
int *ind_dev;
float *res_dev;
size_t ref_pitch_in_bytes;
size_t query_pitch_in_bytes;
size_t res_pitch_in_bytes;
size_t ind_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t ref_pitch;
size_t query_pitch;
// size_t res_pitch;
size_t ind_pitch;
cudaError_t result;
// Allocate device memory
result = cudaMallocPitch((void **) &ref_dev, &ref_pitch_in_bytes, ref_width * sizeof(float), height);
if (result){
cudaFree(ref_dev);
printErrorMessage(result, ref_width*sizeof(float)*height);
return;
}
result = cudaMallocPitch((void **) &query_dev, &query_pitch_in_bytes, query_width*sizeof(float), height);
if (result){
cudaFree(query_dev);
printErrorMessage(result, query_width*sizeof(float)*k);
return;
}
result = cudaMallocPitch((void **) &dist_dev, &dist_pitch_in_bytes, query_width*sizeof(float), ref_width);
if (result){
cudaFree(dist_dev);
printErrorMessage(result, query_width*sizeof(float)*ref_width);
return;
}
result = cudaMallocPitch((void **) &ind_dev, &ind_pitch_in_bytes, query_width*sizeof(int), k);
if (result){
cudaFree(ind_dev);
printErrorMessage(result, query_width*sizeof(int)*k);
return;
}
result = cudaMallocPitch((void **) &res_dev, &res_pitch_in_bytes, query_width*sizeof(float), 1);
if (result){
cudaFree(res_dev);
printErrorMessage(result, query_width*sizeof(float));
return;
}
// Copy reference and query points to global memory
cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*sizeof(float), ref_width*sizeof(float), height, cudaMemcpyHostToDevice);
cudaMemcpy2D(query_dev, query_pitch_in_bytes, query_host, query_width*sizeof(float), query_width*sizeof(float), height, cudaMemcpyHostToDevice);
// Compute the pitches
ref_pitch = ref_pitch_in_bytes/sizeof(float);
query_pitch = query_pitch_in_bytes/sizeof(float);
// res_pitch = res_pitch_in_bytes/sizeof(float);
ind_pitch = ind_pitch_in_bytes/sizeof(int);
// Set kernel dims
// Each block has 16x16 threads, and processes 1/16 of ref width
// It creates a local 16x16 matrix, which goes down the rows
// The number of blocks depends on nb_ref, threads/block is fixed
dim3 threads_per_block_2D(BLOCK_DIM, BLOCK_DIM, 1);
dim3 threads_per_block_1D(BLOCK_DIM * BLOCK_DIM, 1, 1);
dim3 blocks_2D(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) ref_width/BLOCK_DIM), 1);
dim3 blocks_2D_k(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) k/BLOCK_DIM), 1);
dim3 blocks_1D(std::ceil((float) query_width/(BLOCK_DIM*BLOCK_DIM)), 1, 1);
// Start kernels
computeDistance<<<blocks_2D, threads_per_block_2D>>>(ref_dev, ref_width, ref_pitch, query_dev, query_width, query_pitch, height, dist_dev);
sort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, ind_dev, query_width, query_pitch, ind_pitch, ref_width, k);
// insertionSort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_pitch, ind_dev, ind_pitch, query_width, ref_width, k);
parallelSqrt<<<blocks_2D_k, threads_per_block_2D>>>(dist_dev, query_width, query_pitch, k);
mean<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_width, query_pitch, res_dev, k);
// Copy memory from device to host
cudaMemcpy2D(res_host, query_width*sizeof(float), res_dev, query_pitch_in_bytes, query_width*sizeof(float), 1, cudaMemcpyDeviceToHost);
cudaMemcpy2D(dist_host, query_width*sizeof(float), dist_dev, dist_pitch_in_bytes, query_width*sizeof(float),k , cudaMemcpyDeviceToHost);
cudaMemcpy2D(ind_host, query_width*sizeof(int) , ind_dev, ind_pitch_in_bytes, query_width*sizeof(int) , k, cudaMemcpyDeviceToHost);
cudaFree(ref_dev); cudaFree(query_dev); cudaFree(res_dev); cudaFree(ind_dev);
}
int main() {
// Initialize variables
float *ref;
float *query;
float *dist;
float *res;
int *ind;
int ref_nb = 4096;
int query_nb = 4096;
int dim = 32;
int k = 20;
// Allocate host memory
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(query_nb * k * sizeof(float));
res = (float *) malloc(query_nb * 1 * sizeof(float)); // Mean of the first k distances in the sorted matrix
ind = (int *) malloc(query_nb * k * sizeof(int));
// Generate random data
srand(time(NULL));
for (int i = 0; i<ref_nb * dim; i++) ref[i] = (float) (rand() % 100);
for (int i = 0; i<query_nb * dim; i++) query[i] = (float) (rand() % 100);
knn(ref, ref_nb, query, query_nb, dim, k, dist, res, ind);
for (int j = 0; j < 10; j++) {
std::cout << "( ";
for (int i = 0; i < dim; i++) std::cout << query[i*query_nb+j] << " ";
std::cout << ")" << std::endl;
std::cout << res[j] << std::endl;
for (int i = 0; i < k; i++) std::cout << ind[i*query_nb+j] << " ";
std::cout << std::endl << std::endl;
}
for (int i = 0; i < k; i++) {
for (int j = 0; j < 10; j++) {
std::cout << dist[i*query_nb + j] << " ";
}
std::cout << std::endl;
}
free(ref); free(query); free(dist); free(ind);
return 0;
} | .file "tmpxft_0000fe7b_00000000-6_knn_cuda.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "==================================================\n"
.align 8
.LC1:
.string "MEMORY ALLOCATION ERROR : %s\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "Wished allocated memory : %d\n"
.text
.globl _Z17printErrorMessage9cudaErrori
.type _Z17printErrorMessage9cudaErrori, @function
_Z17printErrorMessage9cudaErrori:
.LFB3669:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %edi, %ebp
movl %esi, %ebx
leaq .LC0(%rip), %r12
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebp, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %ebx, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z17printErrorMessage9cudaErrori, .-_Z17printErrorMessage9cudaErrori
.globl _Z44__device_stub__Z15computeDistancePfiiS_iiiS_PfiiS_iiiS_
.type _Z44__device_stub__Z15computeDistancePfiiS_iiiS_PfiiS_iiiS_, @function
_Z44__device_stub__Z15computeDistancePfiiS_iiiS_PfiiS_iiiS_:
.LFB3696:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 40(%rsp)
movl %esi, 36(%rsp)
movl %edx, 32(%rsp)
movq %rcx, 24(%rsp)
movl %r8d, 20(%rsp)
movl %r9d, 16(%rsp)
movq 216(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 8(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z15computeDistancePfiiS_iiiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z44__device_stub__Z15computeDistancePfiiS_iiiS_PfiiS_iiiS_, .-_Z44__device_stub__Z15computeDistancePfiiS_iiiS_PfiiS_iiiS_
.globl _Z15computeDistancePfiiS_iiiS_
.type _Z15computeDistancePfiiS_iiiS_, @function
_Z15computeDistancePfiiS_iiiS_:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z44__device_stub__Z15computeDistancePfiiS_iiiS_PfiiS_iiiS_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z15computeDistancePfiiS_iiiS_, .-_Z15computeDistancePfiiS_iiiS_
.globl _Z30__device_stub__Z4sortPfPiiiiiiPfPiiiiii
.type _Z30__device_stub__Z4sortPfPiiiiiiPfPiiiiii, @function
_Z30__device_stub__Z4sortPfPiiiiiiPfPiiiiii:
.LFB3698:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4sortPfPiiiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3698:
.size _Z30__device_stub__Z4sortPfPiiiiiiPfPiiiiii, .-_Z30__device_stub__Z4sortPfPiiiiiiPfPiiiiii
.globl _Z4sortPfPiiiiii
.type _Z4sortPfPiiiiii, @function
_Z4sortPfPiiiiii:
.LFB3699:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z30__device_stub__Z4sortPfPiiiiiiPfPiiiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _Z4sortPfPiiiiii, .-_Z4sortPfPiiiiii
.globl _Z35__device_stub__Z12parallelSqrtPfiiiPfiii
.type _Z35__device_stub__Z12parallelSqrtPfiiiPfiii, @function
_Z35__device_stub__Z12parallelSqrtPfiiiPfiii:
.LFB3700:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12parallelSqrtPfiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3700:
.size _Z35__device_stub__Z12parallelSqrtPfiiiPfiii, .-_Z35__device_stub__Z12parallelSqrtPfiiiPfiii
.globl _Z12parallelSqrtPfiii
.type _Z12parallelSqrtPfiii, @function
_Z12parallelSqrtPfiii:
.LFB3701:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z12parallelSqrtPfiiiPfiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _Z12parallelSqrtPfiii, .-_Z12parallelSqrtPfiii
.globl _Z28__device_stub__Z4meanPfiiS_iPfiiS_i
.type _Z28__device_stub__Z4meanPfiiS_iPfiiS_i, @function
_Z28__device_stub__Z4meanPfiiS_iPfiiS_i:
.LFB3702:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movq %rcx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4meanPfiiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3702:
.size _Z28__device_stub__Z4meanPfiiS_iPfiiS_i, .-_Z28__device_stub__Z4meanPfiiS_iPfiiS_i
.globl _Z4meanPfiiS_i
.type _Z4meanPfiiS_i, @function
_Z4meanPfiiS_i:
.LFB3703:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z4meanPfiiS_iPfiiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3703:
.size _Z4meanPfiiS_i, .-_Z4meanPfiiS_i
.globl _Z3knnPfiS_iiiS_S_Pi
.type _Z3knnPfiS_iiiS_S_Pi, @function
_Z3knnPfiS_iiiS_S_Pi:
.LFB3670:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $232, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %rdi, -240(%rbp)
movl %esi, %ebx
movq %rdx, -248(%rbp)
movl %ecx, -212(%rbp)
movl %r8d, -228(%rbp)
movl %r9d, -216(%rbp)
movq 16(%rbp), %rax
movq %rax, -256(%rbp)
movq 24(%rbp), %rax
movq %rax, -264(%rbp)
movq 32(%rbp), %rax
movq %rax, -272(%rbp)
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
movslq %r8d, %r12
movslq %esi, %r14
leaq 0(,%r14,4), %r15
leaq -160(%rbp), %rsi
leaq -200(%rbp), %rdi
movq %r12, %rcx
movq %r15, %rdx
call cudaMallocPitch@PLT
testl %eax, %eax
jne .L55
movslq -212(%rbp), %rax
salq $2, %rax
movq %rax, -224(%rbp)
leaq -152(%rbp), %rsi
leaq -192(%rbp), %rdi
movq %r12, %rcx
movq %rax, %rdx
call cudaMallocPitch@PLT
movl %eax, %r13d
testl %eax, %eax
jne .L56
leaq -128(%rbp), %rsi
leaq -184(%rbp), %rdi
movq %r14, %rcx
movq -224(%rbp), %rdx
call cudaMallocPitch@PLT
movl %eax, %r13d
testl %eax, %eax
jne .L57
movslq -216(%rbp), %r14
leaq -136(%rbp), %rsi
leaq -176(%rbp), %rdi
movq %r14, %rcx
movq -224(%rbp), %rdx
call cudaMallocPitch@PLT
movl %eax, %r13d
testl %eax, %eax
jne .L58
leaq -144(%rbp), %rsi
leaq -168(%rbp), %rdi
movl $1, %ecx
movq -224(%rbp), %rdx
call cudaMallocPitch@PLT
movl %eax, %r13d
testl %eax, %eax
jne .L59
subq $8, %rsp
pushq $1
movq %r12, %r9
movq %r15, %r8
movq %r15, %rcx
movq -240(%rbp), %rdx
movq -160(%rbp), %rsi
movq -200(%rbp), %rdi
call cudaMemcpy2D@PLT
movl $1, (%rsp)
movq %r12, %r9
movq -224(%rbp), %rcx
movq %rcx, %r8
movq -248(%rbp), %rdx
movq -152(%rbp), %rsi
movq -192(%rbp), %rdi
call cudaMemcpy2D@PLT
movq -160(%rbp), %r12
movq -152(%rbp), %r13
shrq $2, %r13
movq -136(%rbp), %r15
movl $16, -116(%rbp)
movl $16, -112(%rbp)
movl $1, -108(%rbp)
movl $256, -104(%rbp)
movl $1, -100(%rbp)
movl $1, -96(%rbp)
pxor %xmm0, %xmm0
cvtsi2ssl %ebx, %xmm0
mulss .LC3(%rip), %xmm0
movaps %xmm0, %xmm1
movss .LC8(%rip), %xmm3
movaps %xmm0, %xmm2
andps %xmm3, %xmm2
movss .LC4(%rip), %xmm4
ucomiss %xmm2, %xmm4
jbe .L44
cvttss2sil %xmm0, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
cmpnless %xmm2, %xmm1
movss .LC6(%rip), %xmm4
andps %xmm4, %xmm1
addss %xmm2, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
.L44:
pxor %xmm0, %xmm0
cvtsi2ssl -212(%rbp), %xmm0
movaps %xmm0, %xmm2
mulss .LC3(%rip), %xmm2
movaps %xmm2, %xmm5
movss .LC8(%rip), %xmm4
movaps %xmm2, %xmm3
andps %xmm4, %xmm3
movss .LC4(%rip), %xmm6
ucomiss %xmm3, %xmm6
jbe .L45
cvttss2sil %xmm2, %eax
pxor %xmm3, %xmm3
cvtsi2ssl %eax, %xmm3
cmpnless %xmm3, %xmm5
movss .LC6(%rip), %xmm6
andps %xmm6, %xmm5
addss %xmm3, %xmm5
andnps %xmm2, %xmm4
orps %xmm4, %xmm5
.L45:
cvttss2siq %xmm5, %rax
movl %eax, -92(%rbp)
cvttss2siq %xmm1, %rdx
movl %edx, -88(%rbp)
movl $1, -84(%rbp)
pxor %xmm1, %xmm1
cvtsi2ssl -216(%rbp), %xmm1
mulss .LC3(%rip), %xmm1
movaps %xmm1, %xmm4
movss .LC8(%rip), %xmm3
movaps %xmm1, %xmm2
andps %xmm3, %xmm2
movss .LC4(%rip), %xmm5
ucomiss %xmm2, %xmm5
jbe .L46
cvttss2sil %xmm1, %edx
pxor %xmm2, %xmm2
cvtsi2ssl %edx, %xmm2
cmpnless %xmm2, %xmm4
movss .LC6(%rip), %xmm5
andps %xmm5, %xmm4
addss %xmm2, %xmm4
andnps %xmm1, %xmm3
orps %xmm3, %xmm4
.L46:
movl %eax, -80(%rbp)
cvttss2siq %xmm4, %rax
movl %eax, -76(%rbp)
movl $1, -72(%rbp)
mulss .LC7(%rip), %xmm0
movaps %xmm0, %xmm3
movss .LC8(%rip), %xmm2
movaps %xmm0, %xmm1
andps %xmm2, %xmm1
movss .LC4(%rip), %xmm4
ucomiss %xmm1, %xmm4
jbe .L47
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
cmpnless %xmm1, %xmm3
movss .LC6(%rip), %xmm4
andps %xmm4, %xmm3
addss %xmm1, %xmm3
andnps %xmm0, %xmm2
orps %xmm2, %xmm3
.L47:
cvttss2siq %xmm3, %rax
movl %eax, -68(%rbp)
movl $1, -64(%rbp)
movl $1, -60(%rbp)
addq $16, %rsp
movl -108(%rbp), %ecx
movl $0, %r9d
movl $0, %r8d
movq -116(%rbp), %rdx
movq -92(%rbp), %rdi
movl -84(%rbp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L60
.L48:
movl -96(%rbp), %ecx
movl $0, %r9d
movl $0, %r8d
movq -104(%rbp), %rdx
movq -68(%rbp), %rdi
movl -60(%rbp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L61
.L49:
movl -108(%rbp), %ecx
movl $0, %r9d
movl $0, %r8d
movq -116(%rbp), %rdx
movq -80(%rbp), %rdi
movl -72(%rbp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L62
.L50:
movl -96(%rbp), %ecx
movl $0, %r9d
movl $0, %r8d
movq -104(%rbp), %rdx
movq -68(%rbp), %rdi
movl -60(%rbp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L63
.L51:
subq $8, %rsp
pushq $2
movl $1, %r9d
movq -224(%rbp), %rbx
movq %rbx, %r8
movq -152(%rbp), %rcx
movq -168(%rbp), %rdx
movq %rbx, %rsi
movq -264(%rbp), %rdi
call cudaMemcpy2D@PLT
movl $2, (%rsp)
movq %r14, %r9
movq %rbx, %r8
movq -128(%rbp), %rcx
movq -184(%rbp), %rdx
movq %rbx, %rsi
movq -256(%rbp), %rdi
call cudaMemcpy2D@PLT
movl $2, (%rsp)
movq %r14, %r9
movq %rbx, %r8
movq -136(%rbp), %rcx
movq -176(%rbp), %rdx
movq %rbx, %rsi
movq -272(%rbp), %rdi
call cudaMemcpy2D@PLT
addq $16, %rsp
movq -200(%rbp), %rdi
call cudaFree@PLT
movq -192(%rbp), %rdi
call cudaFree@PLT
movq -168(%rbp), %rdi
call cudaFree@PLT
movq -176(%rbp), %rdi
call cudaFree@PLT
jmp .L37
.L55:
movl %eax, %r13d
movq -200(%rbp), %rdi
call cudaFree@PLT
movl -228(%rbp), %eax
imull %eax, %ebx
leal 0(,%rbx,4), %esi
movl %r13d, %edi
call _Z17printErrorMessage9cudaErrori
.L37:
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L64
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L56:
.cfi_restore_state
movq -192(%rbp), %rdi
call cudaFree@PLT
movl -212(%rbp), %esi
movl -216(%rbp), %eax
imull %eax, %esi
sall $2, %esi
movl %r13d, %edi
call _Z17printErrorMessage9cudaErrori
jmp .L37
.L57:
movq -184(%rbp), %rdi
call cudaFree@PLT
movl -212(%rbp), %eax
imull %ebx, %eax
leal 0(,%rax,4), %esi
movl %r13d, %edi
call _Z17printErrorMessage9cudaErrori
jmp .L37
.L58:
movq -176(%rbp), %rdi
call cudaFree@PLT
movl -212(%rbp), %esi
movl -216(%rbp), %eax
imull %eax, %esi
sall $2, %esi
movl %r13d, %edi
call _Z17printErrorMessage9cudaErrori
jmp .L37
.L59:
movq -168(%rbp), %rdi
call cudaFree@PLT
movl -212(%rbp), %esi
sall $2, %esi
movl %r13d, %edi
call _Z17printErrorMessage9cudaErrori
jmp .L37
.L60:
movq %r12, %rdx
shrq $2, %rdx
pushq -184(%rbp)
movl -228(%rbp), %eax
pushq %rax
movl %r13d, %r9d
movl -212(%rbp), %r8d
movq -192(%rbp), %rcx
movl %ebx, %esi
movq -200(%rbp), %rdi
call _Z44__device_stub__Z15computeDistancePfiiS_iiiS_PfiiS_iiiS_
addq $16, %rsp
jmp .L48
.L61:
shrq $2, %r15
movq %r15, %r8
subq $8, %rsp
movl -216(%rbp), %eax
pushq %rax
movl %ebx, %r9d
movl %r13d, %ecx
movl -212(%rbp), %edx
movq -176(%rbp), %rsi
movq -184(%rbp), %rdi
call _Z30__device_stub__Z4sortPfPiiiiiiPfPiiiiii
addq $16, %rsp
jmp .L49
.L62:
movl -216(%rbp), %ecx
movl %r13d, %edx
movl -212(%rbp), %esi
movq -184(%rbp), %rdi
call _Z35__device_stub__Z12parallelSqrtPfiiiPfiii
jmp .L50
.L63:
movl -216(%rbp), %r8d
movq -168(%rbp), %rcx
movl %r13d, %edx
movl -212(%rbp), %esi
movq -184(%rbp), %rdi
call _Z28__device_stub__Z4meanPfiiS_iPfiiS_i
jmp .L51
.L64:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size _Z3knnPfiS_iiiS_S_Pi, .-_Z3knnPfiS_iiiS_S_Pi
.section .rodata.str1.1
.LC9:
.string "( "
.LC10:
.string " "
.LC11:
.string ")"
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movl $524288, %edi
call malloc@PLT
movq %rax, %rbx
movq %rax, 24(%rsp)
movl $524288, %edi
call malloc@PLT
movq %rax, 16(%rsp)
movl $327680, %edi
call malloc@PLT
movq %rax, %r15
movl $16384, %edi
call malloc@PLT
movq %rax, 8(%rsp)
movl $327680, %edi
call malloc@PLT
movq %rax, 32(%rsp)
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
leaq 524288(%rbx), %rbp
.L66:
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $100, %edx, %edx
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbx, %rbp
jne .L66
movq 16(%rsp), %rax
movq %rax, %rbx
leaq 524288(%rax), %r12
.L67:
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $100, %edx, %edx
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbx, %r12
jne .L67
subq $8, %rsp
.cfi_def_cfa_offset 120
movq 40(%rsp), %rbx
pushq %rbx
.cfi_def_cfa_offset 128
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq %r15
.cfi_def_cfa_offset 144
movl $20, %r9d
movl $32, %r8d
movl $4096, %ecx
movq 48(%rsp), %rdx
movl $4096, %esi
movq 56(%rsp), %rdi
call _Z3knnPfiS_iiiS_S_Pi
leaq 327680(%rbx), %r13
addq $32, %rsp
.cfi_def_cfa_offset 112
movq $0, (%rsp)
leaq _ZSt4cout(%rip), %rbx
leaq .LC10(%rip), %rbp
movq %r15, 40(%rsp)
jmp .L82
.L97:
call _ZSt16__throw_bad_castv@PLT
.L70:
movq %r14, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r14), %rax
movl $10, %esi
movq %r14, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L71
.L98:
call _ZSt16__throw_bad_castv@PLT
.L73:
movq %r15, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r15), %rax
movl $10, %esi
movq %r15, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L74
.L99:
call _ZSt16__throw_bad_castv@PLT
.L77:
movq %r14, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r14), %rax
movl $10, %esi
movq %r14, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L78
.L100:
call _ZSt16__throw_bad_castv@PLT
.L80:
movq %r15, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r15), %rax
movl $10, %esi
movq %r15, %rdi
call *48(%rax)
movl %eax, %esi
.L81:
movsbl %sil, %esi
movq %r14, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $1, (%rsp)
movq (%rsp), %rax
addq $4, %r13
addq $4, %r12
cmpq $10, %rax
je .L96
.L82:
movl $2, %edx
leaq .LC9(%rip), %rsi
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
leaq -524288(%r12), %r14
.L68:
pxor %xmm0, %xmm0
cvtss2sd (%r14), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $1, %edx
movq %rbp, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $16384, %r14
cmpq %r12, %r14
jne .L68
movl $1, %edx
leaq .LC11(%rip), %rsi
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .L97
cmpb $0, 56(%r14)
je .L70
movzbl 67(%r14), %esi
.L71:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movq 8(%rsp), %rax
movq (%rsp), %rcx
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rcx,4), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %r14
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%r14,%rax), %r15
testq %r15, %r15
je .L98
cmpb $0, 56(%r15)
je .L73
movzbl 67(%r15), %esi
.L74:
movsbl %sil, %esi
movq %r14, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
leaq -327680(%r13), %r14
.L75:
movl (%r14), %esi
movq %rbx, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %rbp, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $16384, %r14
cmpq %r13, %r14
jne .L75
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .L99
cmpb $0, 56(%r14)
je .L77
movzbl 67(%r14), %esi
.L78:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movq %rax, %r14
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%r14,%rax), %r15
testq %r15, %r15
je .L100
cmpb $0, 56(%r15)
je .L80
movzbl 67(%r15), %esi
jmp .L81
.L96:
movq 40(%rsp), %r15
leaq 40(%r15), %rbp
leaq 327720(%r15), %r14
leaq _ZSt4cout(%rip), %r12
leaq .LC10(%rip), %r13
jmp .L83
.L101:
call _ZSt16__throw_bad_castv@PLT
.L86:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
.L87:
movsbl %sil, %esi
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $16384, %rbp
cmpq %r14, %rbp
je .L88
.L83:
leaq -40(%rbp), %rbx
.L84:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $1, %edx
movq %r13, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %rbp
jne .L84
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %rbx
testq %rbx, %rbx
je .L101
cmpb $0, 56(%rbx)
je .L86
movzbl 67(%rbx), %esi
jmp .L87
.L88:
movq 24(%rsp), %rdi
call free@PLT
movq 16(%rsp), %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
movq 32(%rsp), %rdi
call free@PLT
movl $0, %eax
addq $56, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z4meanPfiiS_i"
.LC13:
.string "_Z12parallelSqrtPfiii"
.LC14:
.string "_Z4sortPfPiiiiii"
.section .rodata.str1.8
.align 8
.LC15:
.string "_Z15computeDistancePfiiS_iiiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3705:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z4meanPfiiS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z12parallelSqrtPfiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z4sortPfPiiiiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z15computeDistancePfiiS_iiiS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3705:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 1031798784
.align 4
.LC4:
.long 1258291200
.align 4
.LC6:
.long 1065353216
.align 4
.LC7:
.long 998244352
.align 4
.LC8:
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <iostream>
#include <math.h>
#include "cuda.h"
#include <time.h>
#define BLOCK_DIM 16
__global__ void computeDistance(float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
// They will contain, for each thread, the current coordinates of A and B - block_dim in each step
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x; // Local query point index
int ty = threadIdx.y; // Local ref point index
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y; // Each block has its own start on ref points
begin_B = BLOCK_DIM * blockIdx.x; // Each block has its own start on query points
step_A = BLOCK_DIM * pA; // next step = next row of the big matrix
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA; // Each submatrix treated by given block has BLOCK_DIM columns and dim rows
// Conditions
int cond0 = (begin_A + tx < wA); // current column is out of A
int cond1 = (begin_B + tx < wB); // current column is out of B
int cond2 = (begin_A + ty < wA); // ty is column number in A
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
// ty corresponds to row, tx to column in the resulting matrix, as well as ref and query points in input,
// but when copying to local memory, they work just as numbers for indeces (tx is column number in both cases)
// a/pA + ty is the row number in A corresponding to this thread in this block
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if(cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
// Selection sort
__global__ void sort(float *dist, int *ind, int width, int pitch, int ind_pitch, int height, int k){
// Variables
int l, i, min_index;
float *p_dist;
int *p_ind;
float min_value, tmp;
// xIndex is column in the sorted matrix
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
p_dist = dist+xIndex;
p_ind = ind+xIndex;
min_value = *p_dist;
for (l = 0; l < k; l++) {
min_index = l;
min_value = *(p_dist+l*pitch);
for (i=l+1; i < height; i++) {
if (*(p_dist+i*pitch) < min_value) {
min_index = i;
min_value = *(p_dist+i*pitch);
}
}
if (min_index != l) {
tmp = *(p_dist+min_index*pitch);
*(p_dist+min_index*pitch) = *(p_dist+l*pitch);
*(p_dist+l*pitch) = tmp;
}
p_ind[l*ind_pitch] = min_index;
}
}
}
__global__ void parallelSqrt(float *dist, int width, int pitch, int k) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
// Compute the mean of the first k elements
__global__ void mean(float *dist, int width, int pitch, float *res, int k) {
float sum;
float *p;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
sum = 0;
p = dist + xIndex;
for (int l = 0; l < k*pitch; l += pitch) sum += *(p+l);
res[xIndex] = sum/k;
}
}
void printErrorMessage(cudaError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", cudaGetErrorString(error));
printf("Wished allocated memory : %d\n", memorySize);
printf("==================================================\n");
}
void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host, float* res_host, int *ind_host) {
// Initialize variables
float *ref_dev;
float *query_dev;
float *dist_dev;
int *ind_dev;
float *res_dev;
size_t ref_pitch_in_bytes;
size_t query_pitch_in_bytes;
size_t res_pitch_in_bytes;
size_t ind_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t ref_pitch;
size_t query_pitch;
// size_t res_pitch;
size_t ind_pitch;
cudaError_t result;
// Allocate device memory
result = cudaMallocPitch((void **) &ref_dev, &ref_pitch_in_bytes, ref_width * sizeof(float), height);
if (result){
cudaFree(ref_dev);
printErrorMessage(result, ref_width*sizeof(float)*height);
return;
}
result = cudaMallocPitch((void **) &query_dev, &query_pitch_in_bytes, query_width*sizeof(float), height);
if (result){
cudaFree(query_dev);
printErrorMessage(result, query_width*sizeof(float)*k);
return;
}
result = cudaMallocPitch((void **) &dist_dev, &dist_pitch_in_bytes, query_width*sizeof(float), ref_width);
if (result){
cudaFree(dist_dev);
printErrorMessage(result, query_width*sizeof(float)*ref_width);
return;
}
result = cudaMallocPitch((void **) &ind_dev, &ind_pitch_in_bytes, query_width*sizeof(int), k);
if (result){
cudaFree(ind_dev);
printErrorMessage(result, query_width*sizeof(int)*k);
return;
}
result = cudaMallocPitch((void **) &res_dev, &res_pitch_in_bytes, query_width*sizeof(float), 1);
if (result){
cudaFree(res_dev);
printErrorMessage(result, query_width*sizeof(float));
return;
}
// Copy reference and query points to global memory
cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*sizeof(float), ref_width*sizeof(float), height, cudaMemcpyHostToDevice);
cudaMemcpy2D(query_dev, query_pitch_in_bytes, query_host, query_width*sizeof(float), query_width*sizeof(float), height, cudaMemcpyHostToDevice);
// Compute the pitches
ref_pitch = ref_pitch_in_bytes/sizeof(float);
query_pitch = query_pitch_in_bytes/sizeof(float);
// res_pitch = res_pitch_in_bytes/sizeof(float);
ind_pitch = ind_pitch_in_bytes/sizeof(int);
// Set kernel dims
// Each block has 16x16 threads, and processes 1/16 of ref width
// It creates a local 16x16 matrix, which goes down the rows
// The number of blocks depends on nb_ref, threads/block is fixed
dim3 threads_per_block_2D(BLOCK_DIM, BLOCK_DIM, 1);
dim3 threads_per_block_1D(BLOCK_DIM * BLOCK_DIM, 1, 1);
dim3 blocks_2D(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) ref_width/BLOCK_DIM), 1);
dim3 blocks_2D_k(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) k/BLOCK_DIM), 1);
dim3 blocks_1D(std::ceil((float) query_width/(BLOCK_DIM*BLOCK_DIM)), 1, 1);
// Start kernels
computeDistance<<<blocks_2D, threads_per_block_2D>>>(ref_dev, ref_width, ref_pitch, query_dev, query_width, query_pitch, height, dist_dev);
sort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, ind_dev, query_width, query_pitch, ind_pitch, ref_width, k);
// insertionSort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_pitch, ind_dev, ind_pitch, query_width, ref_width, k);
parallelSqrt<<<blocks_2D_k, threads_per_block_2D>>>(dist_dev, query_width, query_pitch, k);
mean<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_width, query_pitch, res_dev, k);
// Copy memory from device to host
cudaMemcpy2D(res_host, query_width*sizeof(float), res_dev, query_pitch_in_bytes, query_width*sizeof(float), 1, cudaMemcpyDeviceToHost);
cudaMemcpy2D(dist_host, query_width*sizeof(float), dist_dev, dist_pitch_in_bytes, query_width*sizeof(float),k , cudaMemcpyDeviceToHost);
cudaMemcpy2D(ind_host, query_width*sizeof(int) , ind_dev, ind_pitch_in_bytes, query_width*sizeof(int) , k, cudaMemcpyDeviceToHost);
cudaFree(ref_dev); cudaFree(query_dev); cudaFree(res_dev); cudaFree(ind_dev);
}
int main() {
// Initialize variables
float *ref;
float *query;
float *dist;
float *res;
int *ind;
int ref_nb = 4096;
int query_nb = 4096;
int dim = 32;
int k = 20;
// Allocate host memory
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(query_nb * k * sizeof(float));
res = (float *) malloc(query_nb * 1 * sizeof(float)); // Mean of the first k distances in the sorted matrix
ind = (int *) malloc(query_nb * k * sizeof(int));
// Generate random data
srand(time(NULL));
for (int i = 0; i<ref_nb * dim; i++) ref[i] = (float) (rand() % 100);
for (int i = 0; i<query_nb * dim; i++) query[i] = (float) (rand() % 100);
knn(ref, ref_nb, query, query_nb, dim, k, dist, res, ind);
for (int j = 0; j < 10; j++) {
std::cout << "( ";
for (int i = 0; i < dim; i++) std::cout << query[i*query_nb+j] << " ";
std::cout << ")" << std::endl;
std::cout << res[j] << std::endl;
for (int i = 0; i < k; i++) std::cout << ind[i*query_nb+j] << " ";
std::cout << std::endl << std::endl;
}
for (int i = 0; i < k; i++) {
for (int j = 0; j < 10; j++) {
std::cout << dist[i*query_nb + j] << " ";
}
std::cout << std::endl;
}
free(ref); free(query); free(dist); free(ind);
return 0;
} | #include <stdio.h>
#include <iostream>
#include <math.h>
#include "hip/hip_runtime.h"
#include <time.h>
#define BLOCK_DIM 16
__global__ void computeDistance(float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
// They will contain, for each thread, the current coordinates of A and B - block_dim in each step
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x; // Local query point index
int ty = threadIdx.y; // Local ref point index
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y; // Each block has its own start on ref points
begin_B = BLOCK_DIM * blockIdx.x; // Each block has its own start on query points
step_A = BLOCK_DIM * pA; // next step = next row of the big matrix
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA; // Each submatrix treated by given block has BLOCK_DIM columns and dim rows
// Conditions
int cond0 = (begin_A + tx < wA); // current column is out of A
int cond1 = (begin_B + tx < wB); // current column is out of B
int cond2 = (begin_A + ty < wA); // ty is column number in A
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
// ty corresponds to row, tx to column in the resulting matrix, as well as ref and query points in input,
// but when copying to local memory, they work just as numbers for indeces (tx is column number in both cases)
// a/pA + ty is the row number in A corresponding to this thread in this block
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if(cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
// Selection sort
__global__ void sort(float *dist, int *ind, int width, int pitch, int ind_pitch, int height, int k){
// Variables
int l, i, min_index;
float *p_dist;
int *p_ind;
float min_value, tmp;
// xIndex is column in the sorted matrix
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
p_dist = dist+xIndex;
p_ind = ind+xIndex;
min_value = *p_dist;
for (l = 0; l < k; l++) {
min_index = l;
min_value = *(p_dist+l*pitch);
for (i=l+1; i < height; i++) {
if (*(p_dist+i*pitch) < min_value) {
min_index = i;
min_value = *(p_dist+i*pitch);
}
}
if (min_index != l) {
tmp = *(p_dist+min_index*pitch);
*(p_dist+min_index*pitch) = *(p_dist+l*pitch);
*(p_dist+l*pitch) = tmp;
}
p_ind[l*ind_pitch] = min_index;
}
}
}
__global__ void parallelSqrt(float *dist, int width, int pitch, int k) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
// Compute the mean of the first k elements
__global__ void mean(float *dist, int width, int pitch, float *res, int k) {
float sum;
float *p;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
sum = 0;
p = dist + xIndex;
for (int l = 0; l < k*pitch; l += pitch) sum += *(p+l);
res[xIndex] = sum/k;
}
}
void printErrorMessage(hipError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", hipGetErrorString(error));
printf("Wished allocated memory : %d\n", memorySize);
printf("==================================================\n");
}
void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host, float* res_host, int *ind_host) {
// Initialize variables
float *ref_dev;
float *query_dev;
float *dist_dev;
int *ind_dev;
float *res_dev;
size_t ref_pitch_in_bytes;
size_t query_pitch_in_bytes;
size_t res_pitch_in_bytes;
size_t ind_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t ref_pitch;
size_t query_pitch;
// size_t res_pitch;
size_t ind_pitch;
hipError_t result;
// Allocate device memory
result = hipMallocPitch((void **) &ref_dev, &ref_pitch_in_bytes, ref_width * sizeof(float), height);
if (result){
hipFree(ref_dev);
printErrorMessage(result, ref_width*sizeof(float)*height);
return;
}
result = hipMallocPitch((void **) &query_dev, &query_pitch_in_bytes, query_width*sizeof(float), height);
if (result){
hipFree(query_dev);
printErrorMessage(result, query_width*sizeof(float)*k);
return;
}
result = hipMallocPitch((void **) &dist_dev, &dist_pitch_in_bytes, query_width*sizeof(float), ref_width);
if (result){
hipFree(dist_dev);
printErrorMessage(result, query_width*sizeof(float)*ref_width);
return;
}
result = hipMallocPitch((void **) &ind_dev, &ind_pitch_in_bytes, query_width*sizeof(int), k);
if (result){
hipFree(ind_dev);
printErrorMessage(result, query_width*sizeof(int)*k);
return;
}
result = hipMallocPitch((void **) &res_dev, &res_pitch_in_bytes, query_width*sizeof(float), 1);
if (result){
hipFree(res_dev);
printErrorMessage(result, query_width*sizeof(float));
return;
}
// Copy reference and query points to global memory
hipMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*sizeof(float), ref_width*sizeof(float), height, hipMemcpyHostToDevice);
hipMemcpy2D(query_dev, query_pitch_in_bytes, query_host, query_width*sizeof(float), query_width*sizeof(float), height, hipMemcpyHostToDevice);
// Compute the pitches
ref_pitch = ref_pitch_in_bytes/sizeof(float);
query_pitch = query_pitch_in_bytes/sizeof(float);
// res_pitch = res_pitch_in_bytes/sizeof(float);
ind_pitch = ind_pitch_in_bytes/sizeof(int);
// Set kernel dims
// Each block has 16x16 threads, and processes 1/16 of ref width
// It creates a local 16x16 matrix, which goes down the rows
// The number of blocks depends on nb_ref, threads/block is fixed
dim3 threads_per_block_2D(BLOCK_DIM, BLOCK_DIM, 1);
dim3 threads_per_block_1D(BLOCK_DIM * BLOCK_DIM, 1, 1);
dim3 blocks_2D(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) ref_width/BLOCK_DIM), 1);
dim3 blocks_2D_k(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) k/BLOCK_DIM), 1);
dim3 blocks_1D(std::ceil((float) query_width/(BLOCK_DIM*BLOCK_DIM)), 1, 1);
// Start kernels
computeDistance<<<blocks_2D, threads_per_block_2D>>>(ref_dev, ref_width, ref_pitch, query_dev, query_width, query_pitch, height, dist_dev);
sort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, ind_dev, query_width, query_pitch, ind_pitch, ref_width, k);
// insertionSort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_pitch, ind_dev, ind_pitch, query_width, ref_width, k);
parallelSqrt<<<blocks_2D_k, threads_per_block_2D>>>(dist_dev, query_width, query_pitch, k);
mean<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_width, query_pitch, res_dev, k);
// Copy memory from device to host
hipMemcpy2D(res_host, query_width*sizeof(float), res_dev, query_pitch_in_bytes, query_width*sizeof(float), 1, hipMemcpyDeviceToHost);
hipMemcpy2D(dist_host, query_width*sizeof(float), dist_dev, dist_pitch_in_bytes, query_width*sizeof(float),k , hipMemcpyDeviceToHost);
hipMemcpy2D(ind_host, query_width*sizeof(int) , ind_dev, ind_pitch_in_bytes, query_width*sizeof(int) , k, hipMemcpyDeviceToHost);
hipFree(ref_dev); hipFree(query_dev); hipFree(res_dev); hipFree(ind_dev);
}
int main() {
// Initialize variables
float *ref;
float *query;
float *dist;
float *res;
int *ind;
int ref_nb = 4096;
int query_nb = 4096;
int dim = 32;
int k = 20;
// Allocate host memory
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(query_nb * k * sizeof(float));
res = (float *) malloc(query_nb * 1 * sizeof(float)); // Mean of the first k distances in the sorted matrix
ind = (int *) malloc(query_nb * k * sizeof(int));
// Generate random data
srand(time(NULL));
for (int i = 0; i<ref_nb * dim; i++) ref[i] = (float) (rand() % 100);
for (int i = 0; i<query_nb * dim; i++) query[i] = (float) (rand() % 100);
knn(ref, ref_nb, query, query_nb, dim, k, dist, res, ind);
for (int j = 0; j < 10; j++) {
std::cout << "( ";
for (int i = 0; i < dim; i++) std::cout << query[i*query_nb+j] << " ";
std::cout << ")" << std::endl;
std::cout << res[j] << std::endl;
for (int i = 0; i < k; i++) std::cout << ind[i*query_nb+j] << " ";
std::cout << std::endl << std::endl;
}
for (int i = 0; i < k; i++) {
for (int j = 0; j < 10; j++) {
std::cout << dist[i*query_nb + j] << " ";
}
std::cout << std::endl;
}
free(ref); free(query); free(dist); free(ind);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <iostream>
#include <math.h>
#include "hip/hip_runtime.h"
#include <time.h>
#define BLOCK_DIM 16
__global__ void computeDistance(float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
// They will contain, for each thread, the current coordinates of A and B - block_dim in each step
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x; // Local query point index
int ty = threadIdx.y; // Local ref point index
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y; // Each block has its own start on ref points
begin_B = BLOCK_DIM * blockIdx.x; // Each block has its own start on query points
step_A = BLOCK_DIM * pA; // next step = next row of the big matrix
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA; // Each submatrix treated by given block has BLOCK_DIM columns and dim rows
// Conditions
int cond0 = (begin_A + tx < wA); // current column is out of A
int cond1 = (begin_B + tx < wB); // current column is out of B
int cond2 = (begin_A + ty < wA); // ty is column number in A
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
// ty corresponds to row, tx to column in the resulting matrix, as well as ref and query points in input,
// but when copying to local memory, they work just as numbers for indeces (tx is column number in both cases)
// a/pA + ty is the row number in A corresponding to this thread in this block
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if(cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
// Selection sort
__global__ void sort(float *dist, int *ind, int width, int pitch, int ind_pitch, int height, int k){
// Variables
int l, i, min_index;
float *p_dist;
int *p_ind;
float min_value, tmp;
// xIndex is column in the sorted matrix
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
p_dist = dist+xIndex;
p_ind = ind+xIndex;
min_value = *p_dist;
for (l = 0; l < k; l++) {
min_index = l;
min_value = *(p_dist+l*pitch);
for (i=l+1; i < height; i++) {
if (*(p_dist+i*pitch) < min_value) {
min_index = i;
min_value = *(p_dist+i*pitch);
}
}
if (min_index != l) {
tmp = *(p_dist+min_index*pitch);
*(p_dist+min_index*pitch) = *(p_dist+l*pitch);
*(p_dist+l*pitch) = tmp;
}
p_ind[l*ind_pitch] = min_index;
}
}
}
__global__ void parallelSqrt(float *dist, int width, int pitch, int k) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
// Compute the mean of the first k elements
__global__ void mean(float *dist, int width, int pitch, float *res, int k) {
float sum;
float *p;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
sum = 0;
p = dist + xIndex;
for (int l = 0; l < k*pitch; l += pitch) sum += *(p+l);
res[xIndex] = sum/k;
}
}
void printErrorMessage(hipError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", hipGetErrorString(error));
printf("Wished allocated memory : %d\n", memorySize);
printf("==================================================\n");
}
void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host, float* res_host, int *ind_host) {
// Initialize variables
float *ref_dev;
float *query_dev;
float *dist_dev;
int *ind_dev;
float *res_dev;
size_t ref_pitch_in_bytes;
size_t query_pitch_in_bytes;
size_t res_pitch_in_bytes;
size_t ind_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t ref_pitch;
size_t query_pitch;
// size_t res_pitch;
size_t ind_pitch;
hipError_t result;
// Allocate device memory
result = hipMallocPitch((void **) &ref_dev, &ref_pitch_in_bytes, ref_width * sizeof(float), height);
if (result){
hipFree(ref_dev);
printErrorMessage(result, ref_width*sizeof(float)*height);
return;
}
result = hipMallocPitch((void **) &query_dev, &query_pitch_in_bytes, query_width*sizeof(float), height);
if (result){
hipFree(query_dev);
printErrorMessage(result, query_width*sizeof(float)*k);
return;
}
result = hipMallocPitch((void **) &dist_dev, &dist_pitch_in_bytes, query_width*sizeof(float), ref_width);
if (result){
hipFree(dist_dev);
printErrorMessage(result, query_width*sizeof(float)*ref_width);
return;
}
result = hipMallocPitch((void **) &ind_dev, &ind_pitch_in_bytes, query_width*sizeof(int), k);
if (result){
hipFree(ind_dev);
printErrorMessage(result, query_width*sizeof(int)*k);
return;
}
result = hipMallocPitch((void **) &res_dev, &res_pitch_in_bytes, query_width*sizeof(float), 1);
if (result){
hipFree(res_dev);
printErrorMessage(result, query_width*sizeof(float));
return;
}
// Copy reference and query points to global memory
hipMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*sizeof(float), ref_width*sizeof(float), height, hipMemcpyHostToDevice);
hipMemcpy2D(query_dev, query_pitch_in_bytes, query_host, query_width*sizeof(float), query_width*sizeof(float), height, hipMemcpyHostToDevice);
// Compute the pitches
ref_pitch = ref_pitch_in_bytes/sizeof(float);
query_pitch = query_pitch_in_bytes/sizeof(float);
// res_pitch = res_pitch_in_bytes/sizeof(float);
ind_pitch = ind_pitch_in_bytes/sizeof(int);
// Set kernel dims
// Each block has 16x16 threads, and processes 1/16 of ref width
// It creates a local 16x16 matrix, which goes down the rows
// The number of blocks depends on nb_ref, threads/block is fixed
dim3 threads_per_block_2D(BLOCK_DIM, BLOCK_DIM, 1);
dim3 threads_per_block_1D(BLOCK_DIM * BLOCK_DIM, 1, 1);
dim3 blocks_2D(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) ref_width/BLOCK_DIM), 1);
dim3 blocks_2D_k(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) k/BLOCK_DIM), 1);
dim3 blocks_1D(std::ceil((float) query_width/(BLOCK_DIM*BLOCK_DIM)), 1, 1);
// Start kernels
computeDistance<<<blocks_2D, threads_per_block_2D>>>(ref_dev, ref_width, ref_pitch, query_dev, query_width, query_pitch, height, dist_dev);
sort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, ind_dev, query_width, query_pitch, ind_pitch, ref_width, k);
// insertionSort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_pitch, ind_dev, ind_pitch, query_width, ref_width, k);
parallelSqrt<<<blocks_2D_k, threads_per_block_2D>>>(dist_dev, query_width, query_pitch, k);
mean<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_width, query_pitch, res_dev, k);
// Copy memory from device to host
hipMemcpy2D(res_host, query_width*sizeof(float), res_dev, query_pitch_in_bytes, query_width*sizeof(float), 1, hipMemcpyDeviceToHost);
hipMemcpy2D(dist_host, query_width*sizeof(float), dist_dev, dist_pitch_in_bytes, query_width*sizeof(float),k , hipMemcpyDeviceToHost);
hipMemcpy2D(ind_host, query_width*sizeof(int) , ind_dev, ind_pitch_in_bytes, query_width*sizeof(int) , k, hipMemcpyDeviceToHost);
hipFree(ref_dev); hipFree(query_dev); hipFree(res_dev); hipFree(ind_dev);
}
int main() {
// Initialize variables
float *ref;
float *query;
float *dist;
float *res;
int *ind;
int ref_nb = 4096;
int query_nb = 4096;
int dim = 32;
int k = 20;
// Allocate host memory
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(query_nb * k * sizeof(float));
res = (float *) malloc(query_nb * 1 * sizeof(float)); // Mean of the first k distances in the sorted matrix
ind = (int *) malloc(query_nb * k * sizeof(int));
// Generate random data
srand(time(NULL));
for (int i = 0; i<ref_nb * dim; i++) ref[i] = (float) (rand() % 100);
for (int i = 0; i<query_nb * dim; i++) query[i] = (float) (rand() % 100);
knn(ref, ref_nb, query, query_nb, dim, k, dist, res, ind);
for (int j = 0; j < 10; j++) {
std::cout << "( ";
for (int i = 0; i < dim; i++) std::cout << query[i*query_nb+j] << " ";
std::cout << ")" << std::endl;
std::cout << res[j] << std::endl;
for (int i = 0; i < k; i++) std::cout << ind[i*query_nb+j] << " ";
std::cout << std::endl << std::endl;
}
for (int i = 0; i < k; i++) {
for (int j = 0; j < 10; j++) {
std::cout << dist[i*query_nb + j] << " ";
}
std::cout << std::endl;
}
free(ref); free(query); free(dist); free(ind);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15computeDistancePfiiS_iiiS_
.globl _Z15computeDistancePfiiS_iiiS_
.p2align 8
.type _Z15computeDistancePfiiS_iiiS_,@function
_Z15computeDistancePfiiS_iiiS_:
s_clause 0x2
s_load_b64 s[10:11], s[0:1], 0x8
s_load_b32 s12, s[0:1], 0x20
s_load_b64 s[4:5], s[0:1], 0x18
v_dual_mov_b32 v4, 0 :: v_dual_and_b32 v1, 0x3ff, v0
s_movk_i32 s2, 0x800
s_movk_i32 s3, 0x800
v_bfe_u32 v0, v0, 10, 10
s_lshl_b32 s13, s15, 4
s_lshl_b32 s16, s14, 4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v3, s16
v_add_nc_u32_e32 v7, s16, v1
ds_store_b32 v4, v3 offset:2064
s_waitcnt lgkmcnt(0)
s_lshl_b32 s6, s5, 4
v_dual_mov_b32 v10, s6 :: v_dual_add_nc_u32 v5, s2, v4
s_lshl_b32 s2, s11, 4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v9, s2 :: v_dual_add_nc_u32 v6, s3, v4
s_add_i32 s3, s12, -1
v_cmp_gt_i32_e64 s2, s4, v7
s_mul_i32 s3, s3, s11
s_add_i32 s7, s3, s13
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v11, s7 :: v_dual_add_nc_u32 v8, s13, v0
s_cmp_gt_i32 s13, s7
ds_store_2addr_b32 v5, v10, v2 offset0:2 offset1:3
ds_store_2addr_b32 v6, v11, v9 offset1:1
v_cmp_gt_i32_e64 s3, s10, v8
s_cbranch_scc1 .LBB0_14
s_ashr_i32 s14, s11, 31
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x10
s_add_i32 s4, s11, s14
v_lshlrev_b32_e32 v10, 2, v1
s_xor_b32 s15, s4, s14
v_dual_mov_b32 v6, s16 :: v_dual_add_nc_u32 v9, s13, v1
v_cvt_f32_u32_e32 v2, s15
v_lshlrev_b32_e32 v11, 6, v0
v_add_nc_u32_e32 v7, 0x400, v10
v_lshlrev_b32_e32 v5, 2, v0
v_cmp_gt_i32_e64 s4, s10, v9
v_rcp_iflag_f32_e32 v8, v2
v_mad_u64_u32 v[2:3], null, v0, s11, v[1:2]
s_and_b32 s10, s3, s2
s_sub_i32 s11, 0, s15
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[3:4], null, v0, s5, v[1:2]
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v4, 0x4f7ffffe, v8
v_dual_mov_b32 v8, 0 :: v_dual_add_nc_u32 v9, v11, v10
v_add_nc_u32_e32 v10, v7, v11
v_cvt_u32_f32_e32 v11, v4
v_mov_b32_e32 v4, 0
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s16
s_movk_i32 s16, 0x800
s_delay_alu instid0(SALU_CYCLE_1)
v_add_nc_u32_e32 v12, s16, v8
s_barrier
buffer_gl0_inv
ds_load_2addr_b32 v[12:13], v12 offset1:1
ds_load_b32 v14, v8 offset:2056
s_waitcnt lgkmcnt(1)
v_readfirstlane_b32 s16, v13
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v14, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s13, s16, s13
v_cmp_gt_i32_e32 vcc_lo, s13, v12
s_cbranch_vccnz .LBB0_14
.LBB0_3:
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_readfirstlane_b32 s16, v11
s_ashr_i32 s17, s13, 31
s_add_i32 s19, s13, s17
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_xor_b32 s19, s19, s17
s_mul_i32 s18, s11, s16
s_xor_b32 s17, s17, s14
s_mul_hi_u32 s18, s16, s18
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s16, s16, s18
s_mul_hi_u32 s16, s19, s16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s18, s16, s15
s_sub_i32 s18, s19, s18
s_add_i32 s19, s16, 1
s_sub_i32 s20, s18, s15
s_cmp_ge_u32 s18, s15
s_cselect_b32 s16, s19, s16
s_cselect_b32 s18, s20, s18
s_add_i32 s19, s16, 1
s_cmp_ge_u32 s18, s15
s_cselect_b32 s16, s19, s16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s16, s16, s17
s_sub_i32 s16, s16, s17
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v12, s16, v0
s_mov_b32 s16, exec_lo
v_cmpx_le_i32_e64 s12, v12
s_xor_b32 s16, exec_lo, s16
s_cbranch_execz .LBB0_5
ds_store_b32 v9, v8
ds_store_b32 v10, v8
.LBB0_5:
s_and_not1_saveexec_b32 s16, s16
s_cbranch_execz .LBB0_11
v_dual_mov_b32 v12, 0 :: v_dual_mov_b32 v13, 0
s_and_saveexec_b32 s17, s4
s_cbranch_execz .LBB0_8
v_add_nc_u32_e32 v13, s13, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v14, 31, v13
v_lshlrev_b64 v[13:14], 2, v[13:14]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v13, vcc_lo, s6, v13
v_add_co_ci_u32_e32 v14, vcc_lo, s7, v14, vcc_lo
global_load_b32 v13, v[13:14], off
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s17
s_waitcnt vmcnt(0)
ds_store_b32 v9, v13
s_and_saveexec_b32 s17, s2
s_cbranch_execz .LBB0_10
v_add_nc_u32_e32 v12, v3, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v13, 31, v12
v_lshlrev_b64 v[12:13], 2, v[12:13]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v12, vcc_lo, s8, v12
v_add_co_ci_u32_e32 v13, vcc_lo, s9, v13, vcc_lo
global_load_b32 v12, v[12:13], off
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s17
s_waitcnt vmcnt(0)
ds_store_b32 v10, v12
.LBB0_11:
s_or_b32 exec_lo, exec_lo, s16
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s16, s10
s_cbranch_execz .LBB0_2
s_mov_b32 s17, 0
.LBB0_13:
s_delay_alu instid0(SALU_CYCLE_1)
v_add_nc_u32_e32 v12, s17, v5
v_add_nc_u32_e32 v13, s17, v7
s_add_i32 s17, s17, 64
ds_load_b32 v12, v12
ds_load_b32 v13, v13
s_cmpk_lg_i32 s17, 0x400
s_waitcnt lgkmcnt(0)
v_sub_f32_e32 v12, v12, v13
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v4, v12, v12
s_cbranch_scc1 .LBB0_13
s_branch .LBB0_2
.LBB0_14:
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s3, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_16
v_mov_b32_e32 v2, 0
s_movk_i32 s2, 0x800
s_load_b64 s[0:1], s[0:1], 0x28
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, s2, v2
ds_load_2addr_b32 v[2:3], v2 offset0:3 offset1:4
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v0, v2, v0
v_mul_lo_u32 v0, v0, s5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, v3, v1, v0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v4, off
.LBB0_16:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15computeDistancePfiiS_iiiS_
.amdhsa_group_segment_fixed_size 2068
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 48
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 21
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15computeDistancePfiiS_iiiS_, .Lfunc_end0-_Z15computeDistancePfiiS_iiiS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4sortPfPiiiiii
.globl _Z4sortPfPiiiiii
.p2align 8
.type _Z4sortPfPiiiiii,@function
_Z4sortPfPiiiiii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB1_9
s_load_b32 s4, s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s4, 1
s_cbranch_scc1 .LBB1_9
s_clause 0x1
s_load_b128 s[8:11], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x14
v_mov_b32_e32 v2, 0
s_load_b32 s5, s[0:1], 0x1c
s_mov_b32 s7, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s8, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v1, vcc_lo
s_mov_b32 s6, s2
s_branch .LBB1_4
.LBB1_3:
s_or_b32 exec_lo, exec_lo, s0
s_mul_i32 s0, s7, s3
s_add_i32 s6, s6, s2
s_ashr_i32 s1, s0, 31
s_mov_b32 s7, s8
s_lshl_b64 s[0:1], s[0:1], 2
s_cmp_lg_u32 s8, s4
v_add_co_u32 v0, vcc_lo, v4, s0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v5, vcc_lo
global_store_b32 v[0:1], v6, off
s_cbranch_scc0 .LBB1_9
.LBB1_4:
s_mul_i32 s0, s7, s2
v_mov_b32_e32 v6, s7
s_ashr_i32 s1, s0, 31
s_add_i32 s8, s7, 1
s_lshl_b64 s[0:1], s[0:1], 2
s_cmp_ge_i32 s8, s5
v_add_co_u32 v0, vcc_lo, v2, s0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo
global_load_b32 v7, v[0:1], off
s_cbranch_scc1 .LBB1_7
v_mov_b32_e32 v6, s7
s_waitcnt vmcnt(0)
v_mov_b32_e32 v8, v7
s_mov_b32 s0, s6
s_mov_b32 s9, s8
.LBB1_6:
s_ashr_i32 s1, s0, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[10:11], s[0:1], 2
s_add_i32 s0, s0, s2
v_add_co_u32 v9, vcc_lo, v2, s10
v_add_co_ci_u32_e32 v10, vcc_lo, s11, v3, vcc_lo
global_load_b32 v9, v[9:10], off
s_waitcnt vmcnt(0)
v_cmp_lt_f32_e32 vcc_lo, v9, v8
v_cndmask_b32_e64 v6, v6, s9, vcc_lo
v_cndmask_b32_e32 v8, v8, v9, vcc_lo
s_add_i32 s9, s9, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lt_i32 s9, s5
s_cbranch_scc1 .LBB1_6
.LBB1_7:
s_mov_b32 s0, exec_lo
v_cmpx_ne_u32_e64 s7, v6
s_cbranch_execz .LBB1_3
v_mul_lo_u32 v8, v6, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v9, 31, v8
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v8, vcc_lo, v2, v8
v_add_co_ci_u32_e32 v9, vcc_lo, v3, v9, vcc_lo
global_load_b32 v10, v[8:9], off
s_waitcnt vmcnt(1)
global_store_b32 v[8:9], v7, off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v10, off
s_branch .LBB1_3
.LBB1_9:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4sortPfPiiiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z4sortPfPiiiiii, .Lfunc_end1-_Z4sortPfPiiiiii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z12parallelSqrtPfiii
.globl _Z12parallelSqrtPfiii
.p2align 8
.type _Z12parallelSqrtPfiii,@function
_Z12parallelSqrtPfiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x8
s_load_b32 s4, s[0:1], 0x10
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s5, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_u32_e32 vcc_lo, s3, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_u32_e64 s2, s4, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB2_2
s_clause 0x1
s_load_b32 s2, s[0:1], 0xc
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[2:3], null, v1, s2, v[0:1]
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, 0x4f800000, v2
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v2
v_cndmask_b32_e32 v2, v2, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_sqrt_f32_e32 v3, v2
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v4, -1, v3
v_add_nc_u32_e32 v5, 1, v3
v_fma_f32 v6, -v4, v3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v7, -v5, v3, v2
v_cmp_ge_f32_e64 s0, 0, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v3, v3, v4, s0
v_cmp_lt_f32_e64 s0, 0, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e64 v3, v3, v5, s0
v_mul_f32_e32 v4, 0x37800000, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v3, v3, v4, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v2, 0x260
v_cndmask_b32_e32 v2, v3, v2, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB2_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12parallelSqrtPfiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z12parallelSqrtPfiii, .Lfunc_end2-_Z12parallelSqrtPfiii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4meanPfiiS_i
.globl _Z4meanPfiiS_i
.p2align 8
.type _Z4meanPfiiS_i,@function
_Z4meanPfiiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB3_5
s_clause 0x1
s_load_b32 s2, s[0:1], 0xc
s_load_b32 s6, s[0:1], 0x18
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_mov_b32_e32 v0, v2
s_waitcnt lgkmcnt(0)
s_mul_i32 s7, s6, s2
s_cmp_lt_i32 s7, 1
s_cbranch_scc1 .LBB3_4
s_load_b64 s[4:5], s[0:1], 0x0
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_mov_b32_e32 v0, 0
s_ashr_i32 s3, s2, 31
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_lshl_b64 s[4:5], s[2:3], 2
s_mov_b32 s3, 0
.LBB3_3:
global_load_b32 v5, v[3:4], off
v_add_co_u32 v3, vcc_lo, v3, s4
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_add_i32 s3, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s3, s7
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v5
s_cbranch_scc0 .LBB3_3
.LBB3_4:
v_cvt_f32_i32_e32 v3, s6
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v4, null, v3, v3, v0
v_div_scale_f32 v7, vcc_lo, v0, v3, v0
v_rcp_f32_e32 v5, v4
s_waitcnt_depctr 0xfff
v_fma_f32 v6, -v4, v5, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v5, v6, v5
v_mul_f32_e32 v6, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v8, -v4, v6, v7
v_fmac_f32_e32 v6, v8, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v4, -v4, v6, v7
v_div_fmas_f32 v4, v4, v5, v6
s_delay_alu instid0(VALU_DEP_1)
v_div_fixup_f32 v3, v4, v3, v0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[0:1], v3, off
.LBB3_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4meanPfiiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z4meanPfiiS_i, .Lfunc_end3-_Z4meanPfiiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 2068
.kernarg_segment_align: 8
.kernarg_segment_size: 48
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15computeDistancePfiiS_iiiS_
.private_segment_fixed_size: 0
.sgpr_count: 23
.sgpr_spill_count: 0
.symbol: _Z15computeDistancePfiiS_iiiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4sortPfPiiiiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4sortPfPiiiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12parallelSqrtPfiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12parallelSqrtPfiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4meanPfiiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4meanPfiiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <iostream>
#include <math.h>
#include "hip/hip_runtime.h"
#include <time.h>
#define BLOCK_DIM 16
__global__ void computeDistance(float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
// They will contain, for each thread, the current coordinates of A and B - block_dim in each step
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x; // Local query point index
int ty = threadIdx.y; // Local ref point index
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y; // Each block has its own start on ref points
begin_B = BLOCK_DIM * blockIdx.x; // Each block has its own start on query points
step_A = BLOCK_DIM * pA; // next step = next row of the big matrix
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA; // Each submatrix treated by given block has BLOCK_DIM columns and dim rows
// Conditions
int cond0 = (begin_A + tx < wA); // current column is out of A
int cond1 = (begin_B + tx < wB); // current column is out of B
int cond2 = (begin_A + ty < wA); // ty is column number in A
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
// ty corresponds to row, tx to column in the resulting matrix, as well as ref and query points in input,
// but when copying to local memory, they work just as numbers for indeces (tx is column number in both cases)
// a/pA + ty is the row number in A corresponding to this thread in this block
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if(cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
// Selection sort
__global__ void sort(float *dist, int *ind, int width, int pitch, int ind_pitch, int height, int k){
// Variables
int l, i, min_index;
float *p_dist;
int *p_ind;
float min_value, tmp;
// xIndex is column in the sorted matrix
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
p_dist = dist+xIndex;
p_ind = ind+xIndex;
min_value = *p_dist;
for (l = 0; l < k; l++) {
min_index = l;
min_value = *(p_dist+l*pitch);
for (i=l+1; i < height; i++) {
if (*(p_dist+i*pitch) < min_value) {
min_index = i;
min_value = *(p_dist+i*pitch);
}
}
if (min_index != l) {
tmp = *(p_dist+min_index*pitch);
*(p_dist+min_index*pitch) = *(p_dist+l*pitch);
*(p_dist+l*pitch) = tmp;
}
p_ind[l*ind_pitch] = min_index;
}
}
}
__global__ void parallelSqrt(float *dist, int width, int pitch, int k) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
// Compute the mean of the first k elements
__global__ void mean(float *dist, int width, int pitch, float *res, int k) {
float sum;
float *p;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
sum = 0;
p = dist + xIndex;
for (int l = 0; l < k*pitch; l += pitch) sum += *(p+l);
res[xIndex] = sum/k;
}
}
void printErrorMessage(hipError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", hipGetErrorString(error));
printf("Wished allocated memory : %d\n", memorySize);
printf("==================================================\n");
}
void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host, float* res_host, int *ind_host) {
// Initialize variables
float *ref_dev;
float *query_dev;
float *dist_dev;
int *ind_dev;
float *res_dev;
size_t ref_pitch_in_bytes;
size_t query_pitch_in_bytes;
size_t res_pitch_in_bytes;
size_t ind_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t ref_pitch;
size_t query_pitch;
// size_t res_pitch;
size_t ind_pitch;
hipError_t result;
// Allocate device memory
result = hipMallocPitch((void **) &ref_dev, &ref_pitch_in_bytes, ref_width * sizeof(float), height);
if (result){
hipFree(ref_dev);
printErrorMessage(result, ref_width*sizeof(float)*height);
return;
}
result = hipMallocPitch((void **) &query_dev, &query_pitch_in_bytes, query_width*sizeof(float), height);
if (result){
hipFree(query_dev);
printErrorMessage(result, query_width*sizeof(float)*k);
return;
}
result = hipMallocPitch((void **) &dist_dev, &dist_pitch_in_bytes, query_width*sizeof(float), ref_width);
if (result){
hipFree(dist_dev);
printErrorMessage(result, query_width*sizeof(float)*ref_width);
return;
}
result = hipMallocPitch((void **) &ind_dev, &ind_pitch_in_bytes, query_width*sizeof(int), k);
if (result){
hipFree(ind_dev);
printErrorMessage(result, query_width*sizeof(int)*k);
return;
}
result = hipMallocPitch((void **) &res_dev, &res_pitch_in_bytes, query_width*sizeof(float), 1);
if (result){
hipFree(res_dev);
printErrorMessage(result, query_width*sizeof(float));
return;
}
// Copy reference and query points to global memory
hipMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*sizeof(float), ref_width*sizeof(float), height, hipMemcpyHostToDevice);
hipMemcpy2D(query_dev, query_pitch_in_bytes, query_host, query_width*sizeof(float), query_width*sizeof(float), height, hipMemcpyHostToDevice);
// Compute the pitches
ref_pitch = ref_pitch_in_bytes/sizeof(float);
query_pitch = query_pitch_in_bytes/sizeof(float);
// res_pitch = res_pitch_in_bytes/sizeof(float);
ind_pitch = ind_pitch_in_bytes/sizeof(int);
// Set kernel dims
// Each block has 16x16 threads, and processes 1/16 of ref width
// It creates a local 16x16 matrix, which goes down the rows
// The number of blocks depends on nb_ref, threads/block is fixed
dim3 threads_per_block_2D(BLOCK_DIM, BLOCK_DIM, 1);
dim3 threads_per_block_1D(BLOCK_DIM * BLOCK_DIM, 1, 1);
dim3 blocks_2D(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) ref_width/BLOCK_DIM), 1);
dim3 blocks_2D_k(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) k/BLOCK_DIM), 1);
dim3 blocks_1D(std::ceil((float) query_width/(BLOCK_DIM*BLOCK_DIM)), 1, 1);
// Start kernels
computeDistance<<<blocks_2D, threads_per_block_2D>>>(ref_dev, ref_width, ref_pitch, query_dev, query_width, query_pitch, height, dist_dev);
sort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, ind_dev, query_width, query_pitch, ind_pitch, ref_width, k);
// insertionSort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_pitch, ind_dev, ind_pitch, query_width, ref_width, k);
parallelSqrt<<<blocks_2D_k, threads_per_block_2D>>>(dist_dev, query_width, query_pitch, k);
mean<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_width, query_pitch, res_dev, k);
// Copy memory from device to host
hipMemcpy2D(res_host, query_width*sizeof(float), res_dev, query_pitch_in_bytes, query_width*sizeof(float), 1, hipMemcpyDeviceToHost);
hipMemcpy2D(dist_host, query_width*sizeof(float), dist_dev, dist_pitch_in_bytes, query_width*sizeof(float),k , hipMemcpyDeviceToHost);
hipMemcpy2D(ind_host, query_width*sizeof(int) , ind_dev, ind_pitch_in_bytes, query_width*sizeof(int) , k, hipMemcpyDeviceToHost);
hipFree(ref_dev); hipFree(query_dev); hipFree(res_dev); hipFree(ind_dev);
}
int main() {
// Initialize variables
float *ref;
float *query;
float *dist;
float *res;
int *ind;
int ref_nb = 4096;
int query_nb = 4096;
int dim = 32;
int k = 20;
// Allocate host memory
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(query_nb * k * sizeof(float));
res = (float *) malloc(query_nb * 1 * sizeof(float)); // Mean of the first k distances in the sorted matrix
ind = (int *) malloc(query_nb * k * sizeof(int));
// Generate random data
srand(time(NULL));
for (int i = 0; i<ref_nb * dim; i++) ref[i] = (float) (rand() % 100);
for (int i = 0; i<query_nb * dim; i++) query[i] = (float) (rand() % 100);
knn(ref, ref_nb, query, query_nb, dim, k, dist, res, ind);
for (int j = 0; j < 10; j++) {
std::cout << "( ";
for (int i = 0; i < dim; i++) std::cout << query[i*query_nb+j] << " ";
std::cout << ")" << std::endl;
std::cout << res[j] << std::endl;
for (int i = 0; i < k; i++) std::cout << ind[i*query_nb+j] << " ";
std::cout << std::endl << std::endl;
}
for (int i = 0; i < k; i++) {
for (int j = 0; j < 10; j++) {
std::cout << dist[i*query_nb + j] << " ";
}
std::cout << std::endl;
}
free(ref); free(query); free(dist); free(ind);
return 0;
} | .text
.file "knn_cuda.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z30__device_stub__computeDistancePfiiS_iiiS_ # -- Begin function _Z30__device_stub__computeDistancePfiiS_iiiS_
.p2align 4, 0x90
.type _Z30__device_stub__computeDistancePfiiS_iiiS_,@function
_Z30__device_stub__computeDistancePfiiS_iiiS_: # @_Z30__device_stub__computeDistancePfiiS_iiiS_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movl %edx, 8(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 160(%rsp), %rax
movq %rax, 128(%rsp)
leaq 168(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z15computeDistancePfiiS_iiiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z30__device_stub__computeDistancePfiiS_iiiS_, .Lfunc_end0-_Z30__device_stub__computeDistancePfiiS_iiiS_
.cfi_endproc
# -- End function
.globl _Z19__device_stub__sortPfPiiiiii # -- Begin function _Z19__device_stub__sortPfPiiiiii
.p2align 4, 0x90
.type _Z19__device_stub__sortPfPiiiiii,@function
_Z19__device_stub__sortPfPiiiiii: # @_Z19__device_stub__sortPfPiiiiii
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 144(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4sortPfPiiiiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end1:
.size _Z19__device_stub__sortPfPiiiiii, .Lfunc_end1-_Z19__device_stub__sortPfPiiiiii
.cfi_endproc
# -- End function
.globl _Z27__device_stub__parallelSqrtPfiii # -- Begin function _Z27__device_stub__parallelSqrtPfiii
.p2align 4, 0x90
.type _Z27__device_stub__parallelSqrtPfiii,@function
_Z27__device_stub__parallelSqrtPfiii: # @_Z27__device_stub__parallelSqrtPfiii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12parallelSqrtPfiii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z27__device_stub__parallelSqrtPfiii, .Lfunc_end2-_Z27__device_stub__parallelSqrtPfiii
.cfi_endproc
# -- End function
.globl _Z19__device_stub__meanPfiiS_i # -- Begin function _Z19__device_stub__meanPfiiS_i
.p2align 4, 0x90
.type _Z19__device_stub__meanPfiiS_i,@function
_Z19__device_stub__meanPfiiS_i: # @_Z19__device_stub__meanPfiiS_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movl %edx, 8(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 8(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4meanPfiiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z19__device_stub__meanPfiiS_i, .Lfunc_end3-_Z19__device_stub__meanPfiiS_i
.cfi_endproc
# -- End function
.globl _Z17printErrorMessage10hipError_ti # -- Begin function _Z17printErrorMessage10hipError_ti
.p2align 4, 0x90
.type _Z17printErrorMessage10hipError_ti,@function
_Z17printErrorMessage10hipError_ti: # @_Z17printErrorMessage10hipError_ti
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
movl %esi, %ebx
movl %edi, %ebp
movl $.Lstr.1, %edi
callq puts@PLT
movl %ebp, %edi
callq hipGetErrorString
movl $.L.str.1, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movl $.Lstr.1, %edi
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp puts@PLT # TAILCALL
.Lfunc_end4:
.size _Z17printErrorMessage10hipError_ti, .Lfunc_end4-_Z17printErrorMessage10hipError_ti
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z3knnPfiS_iiiS_S_Pi
.LCPI5_0:
.long 0x3d800000 # float 0.0625
.LCPI5_1:
.long 0x3b800000 # float 0.00390625
.text
.globl _Z3knnPfiS_iiiS_S_Pi
.p2align 4, 0x90
.type _Z3knnPfiS_iiiS_S_Pi,@function
_Z3knnPfiS_iiiS_S_Pi: # @_Z3knnPfiS_iiiS_S_Pi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, 28(%rsp) # 4-byte Spill
movl %r8d, %r13d
movl %ecx, 52(%rsp) # 4-byte Spill
movq %rdx, 32(%rsp) # 8-byte Spill
movl %esi, %r14d
movq %rdi, 96(%rsp) # 8-byte Spill
movslq %esi, %rbp
leaq (,%rbp,4), %rbx
movslq %r8d, %r15
leaq 80(%rsp), %rdi
leaq 120(%rsp), %rsi
movq %rbx, %rdx
movq %r15, %rcx
callq hipMallocPitch
testl %eax, %eax
je .LBB5_2
# %bb.1:
movl %eax, %r12d
movq 80(%rsp), %rdi
callq hipFree
imull %r13d, %ebx
movl $.Lstr.1, %edi
callq puts@PLT
movl %r12d, %edi
callq hipGetErrorString
movl $.L.str.1, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movl %ebx, %esi
jmp .LBB5_10
.LBB5_2:
movq %rbx, 88(%rsp) # 8-byte Spill
movl %r14d, 24(%rsp) # 4-byte Spill
movl 52(%rsp), %eax # 4-byte Reload
movslq %eax, %r12
shlq $2, %r12
leaq 72(%rsp), %rdi
leaq 104(%rsp), %rsi
movq %r12, %rdx
movq %r15, %rcx
callq hipMallocPitch
testl %eax, %eax
je .LBB5_4
# %bb.3:
movl %eax, %ebx
movq 72(%rsp), %rdi
callq hipFree
imull 28(%rsp), %r12d # 4-byte Folded Reload
movl $.Lstr.1, %edi
callq puts@PLT
movl %ebx, %edi
jmp .LBB5_9
.LBB5_4:
leaq 40(%rsp), %rdi
leaq 152(%rsp), %rsi
movq %r12, %rdx
movq %rbp, %rcx
callq hipMallocPitch
testl %eax, %eax
je .LBB5_6
# %bb.5:
movl %eax, %ebp
movq 40(%rsp), %rdi
callq hipFree
imull 24(%rsp), %r12d # 4-byte Folded Reload
jmp .LBB5_8
.LBB5_6:
movl 28(%rsp), %r14d # 4-byte Reload
movslq %r14d, %rbx
leaq 64(%rsp), %rdi
leaq 112(%rsp), %rsi
movq %r12, %rdx
movq %rbx, %rcx
callq hipMallocPitch
testl %eax, %eax
je .LBB5_11
# %bb.7:
movl %eax, %ebp
movq 64(%rsp), %rdi
callq hipFree
imull %r14d, %r12d
.LBB5_8:
movl $.Lstr.1, %edi
callq puts@PLT
movl %ebp, %edi
.LBB5_9:
callq hipGetErrorString
movl $.L.str.1, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.2, %edi
movl %r12d, %esi
.LBB5_10:
xorl %eax, %eax
callq printf
movl $.Lstr.1, %edi
callq puts@PLT
.LBB5_22:
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB5_11:
.cfi_def_cfa_offset 224
movq %rbx, 144(%rsp) # 8-byte Spill
leaq 56(%rsp), %rdi
leaq 160(%rsp), %rsi
movl $1, %ecx
movq %r12, %rdx
callq hipMallocPitch
testl %eax, %eax
je .LBB5_13
# %bb.12:
movl %eax, %ebp
movq 56(%rsp), %rdi
callq hipFree
movl %ebp, %edi
movl %r12d, %esi
callq _Z17printErrorMessage10hipError_ti
jmp .LBB5_22
.LBB5_13:
movq 80(%rsp), %rdi
movq 120(%rsp), %rsi
movl $1, (%rsp)
movq 96(%rsp), %rdx # 8-byte Reload
movq 88(%rsp), %rcx # 8-byte Reload
movq %rcx, %r8
movq %r15, %r9
callq hipMemcpy2D
movq 72(%rsp), %rdi
movq 104(%rsp), %rsi
movl $1, (%rsp)
movq 32(%rsp), %rdx # 8-byte Reload
movq %r12, %rcx
movq %r12, 96(%rsp) # 8-byte Spill
movq %r12, %r8
movq %r15, %r9
callq hipMemcpy2D
movl %r14d, %r15d
movq 120(%rsp), %rax
movq %rax, 128(%rsp) # 8-byte Spill
movq 104(%rsp), %rax
shrq $2, %rax
movq %rax, 32(%rsp) # 8-byte Spill
movl 52(%rsp), %ebx # 4-byte Reload
cvtsi2ss %ebx, %xmm0
movss %xmm0, 88(%rsp) # 4-byte Spill
movq 112(%rsp), %rax
movq %rax, 136(%rsp) # 8-byte Spill
mulss .LCPI5_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r14
xorps %xmm0, %xmm0
cvtsi2ssl 24(%rsp), %xmm0 # 4-byte Folded Reload
mulss .LCPI5_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %rbp
movl %r14d, %r12d
shlq $32, %rbp
orq %r12, %rbp
xorps %xmm0, %xmm0
cvtsi2ss %r15d, %xmm0
mulss .LCPI5_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r14
shlq $32, %r14
movss 88(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
mulss .LCPI5_1(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %rax
movl %eax, %eax
movabsq $4294967552, %rcx # imm = 0x100000100
leaq (%rax,%rcx), %r15
addq $-256, %r15
movq %rbp, %rdi
movl %ebx, %ebp
movl $1, %esi
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_15
# %bb.14:
movq 128(%rsp), %rdx # 8-byte Reload
shrq $2, %rdx
movq 80(%rsp), %rdi
movq 72(%rsp), %rcx
movq 40(%rsp), %rax
movq %rax, 8(%rsp)
movl %r13d, (%rsp)
movl 24(%rsp), %esi # 4-byte Reload
# kill: def $edx killed $edx killed $rdx
movl %ebp, %r8d
movq 32(%rsp), %r9 # 8-byte Reload
# kill: def $r9d killed $r9d killed $r9
callq _Z30__device_stub__computeDistancePfiiS_iiiS_
.LBB5_15:
orq %r12, %r14
movq %r15, %rdi
movl $1, %esi
movabsq $4294967552, %r12 # imm = 0x100000100
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
movl 28(%rsp), %ebx # 4-byte Reload
movq 144(%rsp), %r13 # 8-byte Reload
jne .LBB5_17
# %bb.16:
movq 136(%rsp), %r8 # 8-byte Reload
shrq $2, %r8
movq 40(%rsp), %rdi
movq 64(%rsp), %rsi
movl %ebx, (%rsp)
movl %ebp, %edx
movq 32(%rsp), %rcx # 8-byte Reload
# kill: def $ecx killed $ecx killed $rcx
# kill: def $r8d killed $r8d killed $r8
movl 24(%rsp), %r9d # 4-byte Reload
callq _Z19__device_stub__sortPfPiiiiii
.LBB5_17:
movq %r14, %rdi
movl $1, %esi
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_19
# %bb.18:
movq 40(%rsp), %rdi
movl %ebp, %esi
movq 32(%rsp), %rdx # 8-byte Reload
# kill: def $edx killed $edx killed $rdx
movl %ebx, %ecx
callq _Z27__device_stub__parallelSqrtPfiii
.LBB5_19:
movq 232(%rsp), %rbx
movq 224(%rsp), %r14
movq %r15, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_21
# %bb.20:
movq 40(%rsp), %rdi
movq 56(%rsp), %rcx
movl %ebp, %esi
movq 32(%rsp), %rdx # 8-byte Reload
# kill: def $edx killed $edx killed $rdx
movl 28(%rsp), %r8d # 4-byte Reload
callq _Z19__device_stub__meanPfiiS_i
.LBB5_21:
movq 56(%rsp), %rdx
movq 104(%rsp), %rcx
movl $2, (%rsp)
movl $1, %r9d
movq %rbx, %rdi
movq 96(%rsp), %r15 # 8-byte Reload
movq %r15, %rsi
movq %r15, %r8
callq hipMemcpy2D
movq 40(%rsp), %rdx
movq 152(%rsp), %rcx
movl $2, (%rsp)
movq %r14, %rdi
movq %r15, %rsi
movq %r15, %r8
movq %r13, %r9
callq hipMemcpy2D
movq 64(%rsp), %rdx
movq 112(%rsp), %rcx
movl $2, (%rsp)
movq 240(%rsp), %rdi
movq %r15, %rsi
movq %r15, %r8
movq %r13, %r9
callq hipMemcpy2D
movq 80(%rsp), %rdi
callq hipFree
movq 72(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipFree
jmp .LBB5_22
.Lfunc_end5:
.size _Z3knnPfiS_iiiS_S_Pi, .Lfunc_end5-_Z3knnPfiS_iiiS_S_Pi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $524288, %edi # imm = 0x80000
callq malloc
movq %rax, %rbx
movl $524288, %edi # imm = 0x80000
callq malloc
movq %rax, %rbp
movl $327680, %edi # imm = 0x50000
callq malloc
movq %rax, %r15
movl $16384, %edi # imm = 0x4000
callq malloc
movq %rax, %r13
movl $327680, %edi # imm = 0x50000
callq malloc
movq %rax, %r12
xorl %r14d, %r14d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
.p2align 4, 0x90
.LBB6_1: # =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%r14,4)
incq %r14
cmpq $131072, %r14 # imm = 0x20000
jne .LBB6_1
# %bb.2: # %.preheader88.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB6_3: # %.preheader88
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbp,%r14,4)
incq %r14
cmpq $131072, %r14 # imm = 0x20000
jne .LBB6_3
# %bb.4:
subq $8, %rsp
.cfi_adjust_cfa_offset 8
movq %rbx, 32(%rsp) # 8-byte Spill
movq %rbx, %rdi
movl $4096, %esi # imm = 0x1000
movq %rbp, %rdx
movl $4096, %ecx # imm = 0x1000
movl $32, %r8d
movl $20, %r9d
pushq %r12
.cfi_adjust_cfa_offset 8
pushq %r13
.cfi_adjust_cfa_offset 8
movq %r15, 32(%rsp) # 8-byte Spill
pushq %r15
.cfi_adjust_cfa_offset 8
callq _Z3knnPfiS_iiiS_S_Pi
addq $32, %rsp
.cfi_adjust_cfa_offset -32
xorl %r15d, %r15d
movq %r12, 16(%rsp) # 8-byte Spill
movq %rbp, 32(%rsp) # 8-byte Spill
movq %rbp, %r14
jmp .LBB6_5
.p2align 4, 0x90
.LBB6_24: # in Loop: Header=BB6_5 Depth=1
movq %rbp, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB6_25: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit81
# in Loop: Header=BB6_5 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
addq $4, %r14
addq $4, %r12
cmpq $10, %r15
je .LBB6_26
.LBB6_5: # =>This Loop Header: Depth=1
# Child Loop BB6_6 Depth 2
# Child Loop BB6_16 Depth 2
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $2, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB6_6: # Parent Loop BB6_5 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%rbp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.4, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
addq $16384, %rbp # imm = 0x4000
cmpq $524288, %rbp # imm = 0x80000
jne .LBB6_6
# %bb.7: # in Loop: Header=BB6_5 Depth=1
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbp
testq %rbp, %rbp
je .LBB6_35
# %bb.8: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB6_5 Depth=1
cmpb $0, 56(%rbp)
je .LBB6_10
# %bb.9: # in Loop: Header=BB6_5 Depth=1
movzbl 67(%rbp), %eax
jmp .LBB6_11
.p2align 4, 0x90
.LBB6_10: # in Loop: Header=BB6_5 Depth=1
movq %rbp, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
.LBB6_11: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB6_5 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movss (%r13,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB6_35
# %bb.12: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i68
# in Loop: Header=BB6_5 Depth=1
cmpb $0, 56(%rbp)
je .LBB6_14
# %bb.13: # in Loop: Header=BB6_5 Depth=1
movzbl 67(%rbp), %ecx
jmp .LBB6_15
.p2align 4, 0x90
.LBB6_14: # in Loop: Header=BB6_5 Depth=1
movq %rbp, %rdi
movq %rax, %rbx
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %rbx, %rax
.LBB6_15: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit71
# in Loop: Header=BB6_5 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq $-327680, %rbp # imm = 0xFFFB0000
.p2align 4, 0x90
.LBB6_16: # Parent Loop BB6_5 Depth=1
# => This Inner Loop Header: Depth=2
movl 327680(%r12,%rbp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.4, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
addq $16384, %rbp # imm = 0x4000
jne .LBB6_16
# %bb.17: # in Loop: Header=BB6_5 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbp
testq %rbp, %rbp
je .LBB6_35
# %bb.18: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i73
# in Loop: Header=BB6_5 Depth=1
cmpb $0, 56(%rbp)
je .LBB6_20
# %bb.19: # in Loop: Header=BB6_5 Depth=1
movzbl 67(%rbp), %eax
jmp .LBB6_21
.p2align 4, 0x90
.LBB6_20: # in Loop: Header=BB6_5 Depth=1
movq %rbp, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
.LBB6_21: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit76
# in Loop: Header=BB6_5 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbp
testq %rbp, %rbp
je .LBB6_35
# %bb.22: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i78
# in Loop: Header=BB6_5 Depth=1
cmpb $0, 56(%rbp)
je .LBB6_24
# %bb.23: # in Loop: Header=BB6_5 Depth=1
movzbl 67(%rbp), %ecx
jmp .LBB6_25
.LBB6_26: # %.preheader.preheader
xorl %r14d, %r14d
movq 8(%rsp), %r15 # 8-byte Reload
movq 32(%rsp), %rbx # 8-byte Reload
jmp .LBB6_27
.p2align 4, 0x90
.LBB6_31: # in Loop: Header=BB6_27 Depth=1
movzbl 67(%r13), %eax
.LBB6_33: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit86
# in Loop: Header=BB6_27 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r14
addq $16384, %r15 # imm = 0x4000
cmpq $20, %r14
je .LBB6_34
.LBB6_27: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB6_28 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB6_28: # Parent Loop BB6_27 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.4, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r12
cmpq $10, %r12
jne .LBB6_28
# %bb.29: # in Loop: Header=BB6_27 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r13
testq %r13, %r13
je .LBB6_35
# %bb.30: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i83
# in Loop: Header=BB6_27 Depth=1
cmpb $0, 56(%r13)
jne .LBB6_31
# %bb.32: # in Loop: Header=BB6_27 Depth=1
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
jmp .LBB6_33
.LBB6_34:
movq 24(%rsp), %rdi # 8-byte Reload
callq free
movq %rbx, %rdi
callq free
movq 8(%rsp), %rdi # 8-byte Reload
callq free
movq 16(%rsp), %rdi # 8-byte Reload
callq free
xorl %eax, %eax
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB6_35:
.cfi_def_cfa_offset 96
callq _ZSt16__throw_bad_castv
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z15computeDistancePfiiS_iiiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4sortPfPiiiiii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12parallelSqrtPfiii, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4meanPfiiS_i, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z15computeDistancePfiiS_iiiS_,@object # @_Z15computeDistancePfiiS_iiiS_
.section .rodata,"a",@progbits
.globl _Z15computeDistancePfiiS_iiiS_
.p2align 3, 0x0
_Z15computeDistancePfiiS_iiiS_:
.quad _Z30__device_stub__computeDistancePfiiS_iiiS_
.size _Z15computeDistancePfiiS_iiiS_, 8
.type _Z4sortPfPiiiiii,@object # @_Z4sortPfPiiiiii
.globl _Z4sortPfPiiiiii
.p2align 3, 0x0
_Z4sortPfPiiiiii:
.quad _Z19__device_stub__sortPfPiiiiii
.size _Z4sortPfPiiiiii, 8
.type _Z12parallelSqrtPfiii,@object # @_Z12parallelSqrtPfiii
.globl _Z12parallelSqrtPfiii
.p2align 3, 0x0
_Z12parallelSqrtPfiii:
.quad _Z27__device_stub__parallelSqrtPfiii
.size _Z12parallelSqrtPfiii, 8
.type _Z4meanPfiiS_i,@object # @_Z4meanPfiiS_i
.globl _Z4meanPfiiS_i
.p2align 3, 0x0
_Z4meanPfiiS_i:
.quad _Z19__device_stub__meanPfiiS_i
.size _Z4meanPfiiS_i, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "MEMORY ALLOCATION ERROR : %s\n"
.size .L.str.1, 31
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Wished allocated memory : %d\n"
.size .L.str.2, 30
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "( "
.size .L.str.3, 3
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz " "
.size .L.str.4, 2
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz ")"
.size .L.str.5, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z15computeDistancePfiiS_iiiS_"
.size .L__unnamed_1, 31
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z4sortPfPiiiiii"
.size .L__unnamed_2, 17
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z12parallelSqrtPfiii"
.size .L__unnamed_3, 22
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z4meanPfiiS_i"
.size .L__unnamed_4, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.1,@object # @str.1
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.1:
.asciz "=================================================="
.size .Lstr.1, 51
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z30__device_stub__computeDistancePfiiS_iiiS_
.addrsig_sym _Z19__device_stub__sortPfPiiiiii
.addrsig_sym _Z27__device_stub__parallelSqrtPfiii
.addrsig_sym _Z19__device_stub__meanPfiiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z15computeDistancePfiiS_iiiS_
.addrsig_sym _Z4sortPfPiiiiii
.addrsig_sym _Z12parallelSqrtPfiii
.addrsig_sym _Z4meanPfiiS_i
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define THREADNUM 4
#define BLOCKNUM 4
__device__ float G_rand(curandState *states, int ind){
curandState local_state = states[ind];
float rand_num = curand_uniform(&local_state);
//states[ind] = local_state;
return rand_num;
}
__global__ void G_srand(curandState *states, unsigned long seed){
int ind = threadIdx.x;
//what is curand_init
curand_init(seed, ind, 0, &states[ind]);
}
__global__ void G_testRand(double *tmp_space, curandState *states){
int t_id = threadIdx.x;
int b_id = blockIdx.x;
tmp_space[(b_id * THREADNUM) + t_id] = G_rand(states, t_id);
return;
}
int main(){
// initialize for parallel computation
curandState *dev_states;
cudaMalloc((void**) &dev_states, sizeof(curandState) * THREADNUM);
G_srand<<<BLOCKNUM, THREADNUM>>>(dev_states, unsigned(time(NULL)));
// prepering for args space
double *G_rand, *C_rand;
cudaMalloc((void**) &G_rand, sizeof(double) * BLOCKNUM * THREADNUM);
C_rand = (double*)malloc(sizeof(double) * BLOCKNUM * THREADNUM);
// calculation
G_testRand<<<BLOCKNUM, THREADNUM>>>(G_rand, dev_states);
// copy back to MainMemory
cudaMemcpy(C_rand, G_rand, sizeof(double) * BLOCKNUM * THREADNUM, cudaMemcpyDeviceToHost);
// output result
int i, j;
printf("Result: ----------------\n");
for (i = 0; i < BLOCKNUM; i++) {
for (j = 0; j < THREADNUM; j++) {
printf("%lf\t", C_rand[(i * THREADNUM) + j]);
}
printf("\n");
}
// delete used memory
cudaFree(dev_states);
cudaFree(G_rand);
free(C_rand);
return 0;
} | .file "tmpxft_00062399_00000000-6_randTest.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2275:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2275:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z6G_randP17curandStateXORWOWi
.type _Z6G_randP17curandStateXORWOWi, @function
_Z6G_randP17curandStateXORWOWi:
.LFB2271:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2271:
.size _Z6G_randP17curandStateXORWOWi, .-_Z6G_randP17curandStateXORWOWi
.globl _Z45__device_stub__Z7G_srandP17curandStateXORWOWmP17curandStateXORWOWm
.type _Z45__device_stub__Z7G_srandP17curandStateXORWOWmP17curandStateXORWOWm, @function
_Z45__device_stub__Z7G_srandP17curandStateXORWOWmP17curandStateXORWOWm:
.LFB2297:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z7G_srandP17curandStateXORWOWm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2297:
.size _Z45__device_stub__Z7G_srandP17curandStateXORWOWmP17curandStateXORWOWm, .-_Z45__device_stub__Z7G_srandP17curandStateXORWOWmP17curandStateXORWOWm
.globl _Z7G_srandP17curandStateXORWOWm
.type _Z7G_srandP17curandStateXORWOWm, @function
_Z7G_srandP17curandStateXORWOWm:
.LFB2298:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z7G_srandP17curandStateXORWOWmP17curandStateXORWOWm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2298:
.size _Z7G_srandP17curandStateXORWOWm, .-_Z7G_srandP17curandStateXORWOWm
.globl _Z50__device_stub__Z10G_testRandPdP17curandStateXORWOWPdP17curandStateXORWOW
.type _Z50__device_stub__Z10G_testRandPdP17curandStateXORWOWPdP17curandStateXORWOW, @function
_Z50__device_stub__Z10G_testRandPdP17curandStateXORWOWPdP17curandStateXORWOW:
.LFB2299:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z10G_testRandPdP17curandStateXORWOW(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2299:
.size _Z50__device_stub__Z10G_testRandPdP17curandStateXORWOWPdP17curandStateXORWOW, .-_Z50__device_stub__Z10G_testRandPdP17curandStateXORWOWPdP17curandStateXORWOW
.globl _Z10G_testRandPdP17curandStateXORWOW
.type _Z10G_testRandPdP17curandStateXORWOW, @function
_Z10G_testRandPdP17curandStateXORWOW:
.LFB2300:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z50__device_stub__Z10G_testRandPdP17curandStateXORWOWPdP17curandStateXORWOW
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2300:
.size _Z10G_testRandPdP17curandStateXORWOW, .-_Z10G_testRandPdP17curandStateXORWOW
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Result: ----------------\n"
.LC1:
.string "%lf\t"
.LC2:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2272:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $192, %esi
call cudaMalloc@PLT
movl $4, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $4, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L30
.L22:
leaq 8(%rsp), %rdi
movl $128, %esi
call cudaMalloc@PLT
movl $128, %edi
call malloc@PLT
movq %rax, %r14
movl $4, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $4, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L23:
movl $2, %ecx
movl $128, %edx
movq 8(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 32(%r14), %rbp
movl $0, %r13d
leaq .LC1(%rip), %r12
leaq .LC2(%rip), %r15
.L24:
leaq -32(%rbp), %rbx
.L25:
movsd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $8, %rbx
cmpq %rbp, %rbx
jne .L25
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $4, %r13d
addq $32, %rbp
cmpl $16, %r13d
jne .L24
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq %r14, %rdi
call free@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L32
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
movl $0, %edi
call time@PLT
movl %eax, %esi
movq (%rsp), %rdi
call _Z45__device_stub__Z7G_srandP17curandStateXORWOWmP17curandStateXORWOWm
jmp .L22
.L31:
movq (%rsp), %rsi
movq 8(%rsp), %rdi
call _Z50__device_stub__Z10G_testRandPdP17curandStateXORWOWPdP17curandStateXORWOW
jmp .L23
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2272:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "_Z10G_testRandPdP17curandStateXORWOW"
.align 8
.LC4:
.string "_Z7G_srandP17curandStateXORWOWm"
.section .rodata.str1.1
.LC5:
.string "precalc_xorwow_matrix"
.LC6:
.string "precalc_xorwow_offset_matrix"
.LC7:
.string "mrg32k3aM1"
.LC8:
.string "mrg32k3aM2"
.LC9:
.string "mrg32k3aM1SubSeq"
.LC10:
.string "mrg32k3aM2SubSeq"
.LC11:
.string "mrg32k3aM1Seq"
.LC12:
.string "mrg32k3aM2Seq"
.LC13:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2302:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z10G_testRandPdP17curandStateXORWOW(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z7G_srandP17curandStateXORWOWm(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2302:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define THREADNUM 4
#define BLOCKNUM 4
__device__ float G_rand(curandState *states, int ind){
curandState local_state = states[ind];
float rand_num = curand_uniform(&local_state);
//states[ind] = local_state;
return rand_num;
}
__global__ void G_srand(curandState *states, unsigned long seed){
int ind = threadIdx.x;
//what is curand_init
curand_init(seed, ind, 0, &states[ind]);
}
__global__ void G_testRand(double *tmp_space, curandState *states){
int t_id = threadIdx.x;
int b_id = blockIdx.x;
tmp_space[(b_id * THREADNUM) + t_id] = G_rand(states, t_id);
return;
}
int main(){
// initialize for parallel computation
curandState *dev_states;
cudaMalloc((void**) &dev_states, sizeof(curandState) * THREADNUM);
G_srand<<<BLOCKNUM, THREADNUM>>>(dev_states, unsigned(time(NULL)));
// prepering for args space
double *G_rand, *C_rand;
cudaMalloc((void**) &G_rand, sizeof(double) * BLOCKNUM * THREADNUM);
C_rand = (double*)malloc(sizeof(double) * BLOCKNUM * THREADNUM);
// calculation
G_testRand<<<BLOCKNUM, THREADNUM>>>(G_rand, dev_states);
// copy back to MainMemory
cudaMemcpy(C_rand, G_rand, sizeof(double) * BLOCKNUM * THREADNUM, cudaMemcpyDeviceToHost);
// output result
int i, j;
printf("Result: ----------------\n");
for (i = 0; i < BLOCKNUM; i++) {
for (j = 0; j < THREADNUM; j++) {
printf("%lf\t", C_rand[(i * THREADNUM) + j]);
}
printf("\n");
}
// delete used memory
cudaFree(dev_states);
cudaFree(G_rand);
free(C_rand);
return 0;
} | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define THREADNUM 4
#define BLOCKNUM 4
__device__ float G_rand(hiprandState *states, int ind){
hiprandState local_state = states[ind];
float rand_num = hiprand_uniform(&local_state);
//states[ind] = local_state;
return rand_num;
}
__global__ void G_srand(hiprandState *states, unsigned long seed){
int ind = threadIdx.x;
//what is curand_init
hiprand_init(seed, ind, 0, &states[ind]);
}
__global__ void G_testRand(double *tmp_space, hiprandState *states){
int t_id = threadIdx.x;
int b_id = blockIdx.x;
tmp_space[(b_id * THREADNUM) + t_id] = G_rand(states, t_id);
return;
}
int main(){
// initialize for parallel computation
hiprandState *dev_states;
hipMalloc((void**) &dev_states, sizeof(hiprandState) * THREADNUM);
G_srand<<<BLOCKNUM, THREADNUM>>>(dev_states, unsigned(time(NULL)));
// prepering for args space
double *G_rand, *C_rand;
hipMalloc((void**) &G_rand, sizeof(double) * BLOCKNUM * THREADNUM);
C_rand = (double*)malloc(sizeof(double) * BLOCKNUM * THREADNUM);
// calculation
G_testRand<<<BLOCKNUM, THREADNUM>>>(G_rand, dev_states);
// copy back to MainMemory
hipMemcpy(C_rand, G_rand, sizeof(double) * BLOCKNUM * THREADNUM, hipMemcpyDeviceToHost);
// output result
int i, j;
printf("Result: ----------------\n");
for (i = 0; i < BLOCKNUM; i++) {
for (j = 0; j < THREADNUM; j++) {
printf("%lf\t", C_rand[(i * THREADNUM) + j]);
}
printf("\n");
}
// delete used memory
hipFree(dev_states);
hipFree(G_rand);
free(C_rand);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define THREADNUM 4
#define BLOCKNUM 4
__device__ float G_rand(hiprandState *states, int ind){
hiprandState local_state = states[ind];
float rand_num = hiprand_uniform(&local_state);
//states[ind] = local_state;
return rand_num;
}
__global__ void G_srand(hiprandState *states, unsigned long seed){
int ind = threadIdx.x;
//what is curand_init
hiprand_init(seed, ind, 0, &states[ind]);
}
__global__ void G_testRand(double *tmp_space, hiprandState *states){
int t_id = threadIdx.x;
int b_id = blockIdx.x;
tmp_space[(b_id * THREADNUM) + t_id] = G_rand(states, t_id);
return;
}
int main(){
// initialize for parallel computation
hiprandState *dev_states;
hipMalloc((void**) &dev_states, sizeof(hiprandState) * THREADNUM);
G_srand<<<BLOCKNUM, THREADNUM>>>(dev_states, unsigned(time(NULL)));
// prepering for args space
double *G_rand, *C_rand;
hipMalloc((void**) &G_rand, sizeof(double) * BLOCKNUM * THREADNUM);
C_rand = (double*)malloc(sizeof(double) * BLOCKNUM * THREADNUM);
// calculation
G_testRand<<<BLOCKNUM, THREADNUM>>>(G_rand, dev_states);
// copy back to MainMemory
hipMemcpy(C_rand, G_rand, sizeof(double) * BLOCKNUM * THREADNUM, hipMemcpyDeviceToHost);
// output result
int i, j;
printf("Result: ----------------\n");
for (i = 0; i < BLOCKNUM; i++) {
for (j = 0; j < THREADNUM; j++) {
printf("%lf\t", C_rand[(i * THREADNUM) + j]);
}
printf("\n");
}
// delete used memory
hipFree(dev_states);
hipFree(G_rand);
free(C_rand);
return 0;
} | .text
.file "randTest.hip"
.globl _Z22__device_stub__G_srandP12hiprandStatem # -- Begin function _Z22__device_stub__G_srandP12hiprandStatem
.p2align 4, 0x90
.type _Z22__device_stub__G_srandP12hiprandStatem,@function
_Z22__device_stub__G_srandP12hiprandStatem: # @_Z22__device_stub__G_srandP12hiprandStatem
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z7G_srandP12hiprandStatem, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z22__device_stub__G_srandP12hiprandStatem, .Lfunc_end0-_Z22__device_stub__G_srandP12hiprandStatem
.cfi_endproc
# -- End function
.globl _Z25__device_stub__G_testRandPdP12hiprandState # -- Begin function _Z25__device_stub__G_testRandPdP12hiprandState
.p2align 4, 0x90
.type _Z25__device_stub__G_testRandPdP12hiprandState,@function
_Z25__device_stub__G_testRandPdP12hiprandState: # @_Z25__device_stub__G_testRandPdP12hiprandState
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z10G_testRandPdP12hiprandState, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z25__device_stub__G_testRandPdP12hiprandState, .Lfunc_end1-_Z25__device_stub__G_testRandPdP12hiprandState
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $104, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movabsq $4294967300, %r14 # imm = 0x100000004
leaq 8(%rsp), %rdi
movl $192, %esi
callq hipMalloc
movq %r14, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq 8(%rsp), %rbx
xorl %edi, %edi
callq time
movl %eax, %eax
movq %rbx, 72(%rsp)
movq %rax, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7G_srandP12hiprandStatem, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
movq %rsp, %rdi
movl $128, %esi
callq hipMalloc
movl $128, %edi
callq malloc
movq %rax, %rbx
movq %r14, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
movq (%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10G_testRandPdP12hiprandState, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
movq (%rsp), %rsi
movl $128, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
xorl %r14d, %r14d
movq %rbx, %r15
.p2align 4, 0x90
.LBB2_5: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_6 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_6: # Parent Loop BB2_5 Depth=1
# => This Inner Loop Header: Depth=2
movsd (%r15,%r12,8), %xmm0 # xmm0 = mem[0],zero
movl $.L.str.1, %edi
movb $1, %al
callq printf
incq %r12
cmpq $4, %r12
jne .LBB2_6
# %bb.7: # in Loop: Header=BB2_5 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
addq $32, %r15
cmpq $4, %r14
jne .LBB2_5
# %bb.8:
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7G_srandP12hiprandStatem, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10G_testRandPdP12hiprandState, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7G_srandP12hiprandStatem,@object # @_Z7G_srandP12hiprandStatem
.section .rodata,"a",@progbits
.globl _Z7G_srandP12hiprandStatem
.p2align 3, 0x0
_Z7G_srandP12hiprandStatem:
.quad _Z22__device_stub__G_srandP12hiprandStatem
.size _Z7G_srandP12hiprandStatem, 8
.type _Z10G_testRandPdP12hiprandState,@object # @_Z10G_testRandPdP12hiprandState
.globl _Z10G_testRandPdP12hiprandState
.p2align 3, 0x0
_Z10G_testRandPdP12hiprandState:
.quad _Z25__device_stub__G_testRandPdP12hiprandState
.size _Z10G_testRandPdP12hiprandState, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%lf\t"
.size .L.str.1, 5
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7G_srandP12hiprandStatem"
.size .L__unnamed_1, 27
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z10G_testRandPdP12hiprandState"
.size .L__unnamed_2, 32
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Result: ----------------"
.size .Lstr, 25
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__G_srandP12hiprandStatem
.addrsig_sym _Z25__device_stub__G_testRandPdP12hiprandState
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7G_srandP12hiprandStatem
.addrsig_sym _Z10G_testRandPdP12hiprandState
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.