system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
extern "C" {
#ifndef DTYPE
#define DTYPE float
#endif
}
__global__ void tensor_5d_equals (const int n, const int c, const int d, const int h, const int w, const DTYPE* x, const int offset_x, const int n_x, const int c_x, const int d_x, const int h_x, const int w_x, const DTYPE* y, const int offset_y, const int n_y, const int c_y, const int d_y, const int h_y, const int w_y, int* eq_flag) {
const int gid_n = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_c = blockIdx.y * blockDim.y + threadIdx.y;
const int gid_d = blockIdx.z * blockDim.z + threadIdx.z;
const bool valid = (gid_n < n) && (gid_c < c) && (gid_d < d);
if (valid) {
const int ix = offset_x + gid_n * n_x + gid_c * c_x + gid_d * d_x;
const int iy = offset_y + gid_n * n_y + gid_c * c_y + gid_d * d_y;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
if (x[ix + i * h_x + j * w_x] != y[iy + i * h_y + j * w_y]){
eq_flag[0]++;
}
}
};
}
} | .file "tmpxft_000b91bd_00000000-6_tensor_5d_equals.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi
.type _Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi, @function
_Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi:
.LFB2051:
.cfi_startproc
endbr64
subq $296, %rsp
.cfi_def_cfa_offset 304
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movl %edx, 36(%rsp)
movl %ecx, 32(%rsp)
movl %r8d, 28(%rsp)
movq %r9, 16(%rsp)
movq 352(%rsp), %rax
movq %rax, 8(%rsp)
movq 408(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 280(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 28(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 304(%rsp), %rax
movq %rax, 160(%rsp)
leaq 312(%rsp), %rax
movq %rax, 168(%rsp)
leaq 320(%rsp), %rax
movq %rax, 176(%rsp)
leaq 328(%rsp), %rax
movq %rax, 184(%rsp)
leaq 336(%rsp), %rax
movq %rax, 192(%rsp)
leaq 344(%rsp), %rax
movq %rax, 200(%rsp)
leaq 8(%rsp), %rax
movq %rax, 208(%rsp)
leaq 360(%rsp), %rax
movq %rax, 216(%rsp)
leaq 368(%rsp), %rax
movq %rax, 224(%rsp)
leaq 376(%rsp), %rax
movq %rax, 232(%rsp)
leaq 384(%rsp), %rax
movq %rax, 240(%rsp)
leaq 392(%rsp), %rax
movq %rax, 248(%rsp)
leaq 400(%rsp), %rax
movq %rax, 256(%rsp)
movq %rsp, %rax
movq %rax, 264(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 280(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $296, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 312
pushq 56(%rsp)
.cfi_def_cfa_offset 320
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 304
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi, .-_Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi
.globl _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.type _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, @function
_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 120(%rsp)
.cfi_def_cfa_offset 24
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 64
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 72
pushq 120(%rsp)
.cfi_def_cfa_offset 80
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 88
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 96
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 112
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 120
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 128
call _Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi
addq $120, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, .-_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
extern "C" {
#ifndef DTYPE
#define DTYPE float
#endif
}
__global__ void tensor_5d_equals (const int n, const int c, const int d, const int h, const int w, const DTYPE* x, const int offset_x, const int n_x, const int c_x, const int d_x, const int h_x, const int w_x, const DTYPE* y, const int offset_y, const int n_y, const int c_y, const int d_y, const int h_y, const int w_y, int* eq_flag) {
const int gid_n = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_c = blockIdx.y * blockDim.y + threadIdx.y;
const int gid_d = blockIdx.z * blockDim.z + threadIdx.z;
const bool valid = (gid_n < n) && (gid_c < c) && (gid_d < d);
if (valid) {
const int ix = offset_x + gid_n * n_x + gid_c * c_x + gid_d * d_x;
const int iy = offset_y + gid_n * n_y + gid_c * c_y + gid_d * d_y;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
if (x[ix + i * h_x + j * w_x] != y[iy + i * h_y + j * w_y]){
eq_flag[0]++;
}
}
};
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
extern "C" {
#ifndef DTYPE
#define DTYPE float
#endif
}
__global__ void tensor_5d_equals (const int n, const int c, const int d, const int h, const int w, const DTYPE* x, const int offset_x, const int n_x, const int c_x, const int d_x, const int h_x, const int w_x, const DTYPE* y, const int offset_y, const int n_y, const int c_y, const int d_y, const int h_y, const int w_y, int* eq_flag) {
const int gid_n = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_c = blockIdx.y * blockDim.y + threadIdx.y;
const int gid_d = blockIdx.z * blockDim.z + threadIdx.z;
const bool valid = (gid_n < n) && (gid_c < c) && (gid_d < d);
if (valid) {
const int ix = offset_x + gid_n * n_x + gid_c * c_x + gid_d * d_x;
const int iy = offset_y + gid_n * n_y + gid_c * c_y + gid_d * d_y;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
if (x[ix + i * h_x + j * w_x] != y[iy + i * h_y + j * w_y]){
eq_flag[0]++;
}
}
};
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
extern "C" {
#ifndef DTYPE
#define DTYPE float
#endif
}
__global__ void tensor_5d_equals (const int n, const int c, const int d, const int h, const int w, const DTYPE* x, const int offset_x, const int n_x, const int c_x, const int d_x, const int h_x, const int w_x, const DTYPE* y, const int offset_y, const int n_y, const int c_y, const int d_y, const int h_y, const int w_y, int* eq_flag) {
const int gid_n = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_c = blockIdx.y * blockDim.y + threadIdx.y;
const int gid_d = blockIdx.z * blockDim.z + threadIdx.z;
const bool valid = (gid_n < n) && (gid_c < c) && (gid_d < d);
if (valid) {
const int ix = offset_x + gid_n * n_x + gid_c * c_x + gid_d * d_x;
const int iy = offset_y + gid_n * n_y + gid_c * c_y + gid_d * d_y;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
if (x[ix + i * h_x + j * w_x] != y[iy + i * h_y + j * w_y]){
eq_flag[0]++;
}
}
};
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.globl _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.p2align 8
.type _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi,@function
_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi:
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x6c
s_load_b64 s[4:5], s[0:1], 0x0
s_load_b32 s6, s[0:1], 0x8
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
v_bfe_u32 v4, v0, 20, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s7, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[0:1], null, s13, s7, v[2:3]
s_and_b32 s3, s3, 0xffff
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
v_mad_u64_u32 v[2:3], null, s15, s3, v[4:5]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_gt_i32_e32 vcc_lo, s4, v0
v_cmp_gt_i32_e64 s2, s5, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e64 s3, s6, v2
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_9
s_load_b32 s20, s[0:1], 0xc
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s20, 1
s_cbranch_scc1 .LBB0_9
s_clause 0x4
s_load_b128 s[12:15], s[0:1], 0x18
s_load_b64 s[2:3], s[0:1], 0x28
s_load_b256 s[4:11], s[0:1], 0x40
s_load_b128 s[16:19], s[0:1], 0x30
s_load_b32 s21, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[3:4], null, v0, s15, s[14:15]
v_mul_lo_u32 v6, v1, s2
v_mul_lo_u32 v7, v2, s3
v_mad_u64_u32 v[4:5], null, v0, s5, s[4:5]
v_mul_lo_u32 v1, v1, s6
v_mul_lo_u32 v2, v2, s7
s_ashr_i32 s3, s17, 31
s_ashr_i32 s1, s9, 31
s_mov_b32 s2, s17
s_delay_alu instid0(VALU_DEP_4)
v_add3_u32 v0, v3, v6, v7
v_mov_b32_e32 v7, 0
s_cmp_gt_i32 s21, 0
s_mov_b32 s0, s9
v_add3_u32 v2, v4, v1, v2
s_cselect_b32 s4, -1, 0
s_mov_b32 s5, 0
s_lshl_b64 s[0:1], s[0:1], 2
s_lshl_b64 s[2:3], s[2:3], 2
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_4
.p2align 6
.LBB0_3:
v_add_nc_u32_e32 v2, s8, v2
v_add_nc_u32_e32 v0, s16, v0
s_add_i32 s5, s5, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s5, s20
s_cbranch_scc1 .LBB0_9
.LBB0_4:
s_and_not1_b32 vcc_lo, exec_lo, s4
s_cbranch_vccnz .LBB0_3
v_ashrrev_i32_e32 v3, 31, v2
v_ashrrev_i32_e32 v1, 31, v0
s_mov_b32 s6, s21
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_lshlrev_b64 v[5:6], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v3, vcc_lo, s18, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s19, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v5, vcc_lo, s12, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s13, v6, vcc_lo
s_branch .LBB0_7
.p2align 6
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s7
v_add_co_u32 v3, vcc_lo, v3, s0
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v4, vcc_lo
v_add_co_u32 v5, vcc_lo, v5, s2
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v6, vcc_lo
s_add_i32 s6, s6, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s6, 0
s_cbranch_scc1 .LBB0_3
.LBB0_7:
global_load_b32 v1, v[5:6], off
global_load_b32 v8, v[3:4], off
s_mov_b32 s7, exec_lo
s_waitcnt vmcnt(0)
v_cmpx_neq_f32_e32 v1, v8
s_cbranch_execz .LBB0_6
global_load_b32 v1, v7, s[10:11]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, 1, v1
global_store_b32 v7, v1, s[10:11]
s_branch .LBB0_6
.LBB0_9:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 352
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 22
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, .Lfunc_end0-_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 44
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: by_value
- .offset: 52
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: by_value
- .offset: 68
.size: 4
.value_kind: by_value
- .offset: 72
.size: 4
.value_kind: by_value
- .offset: 76
.size: 4
.value_kind: by_value
- .offset: 80
.size: 4
.value_kind: by_value
- .offset: 84
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 88
.size: 8
.value_kind: global_buffer
- .offset: 96
.size: 4
.value_kind: hidden_block_count_x
- .offset: 100
.size: 4
.value_kind: hidden_block_count_y
- .offset: 104
.size: 4
.value_kind: hidden_block_count_z
- .offset: 108
.size: 2
.value_kind: hidden_group_size_x
- .offset: 110
.size: 2
.value_kind: hidden_group_size_y
- .offset: 112
.size: 2
.value_kind: hidden_group_size_z
- .offset: 114
.size: 2
.value_kind: hidden_remainder_x
- .offset: 116
.size: 2
.value_kind: hidden_remainder_y
- .offset: 118
.size: 2
.value_kind: hidden_remainder_z
- .offset: 136
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 144
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 152
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 160
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 352
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.private_segment_fixed_size: 0
.sgpr_count: 24
.sgpr_spill_count: 0
.symbol: _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
extern "C" {
#ifndef DTYPE
#define DTYPE float
#endif
}
__global__ void tensor_5d_equals (const int n, const int c, const int d, const int h, const int w, const DTYPE* x, const int offset_x, const int n_x, const int c_x, const int d_x, const int h_x, const int w_x, const DTYPE* y, const int offset_y, const int n_y, const int c_y, const int d_y, const int h_y, const int w_y, int* eq_flag) {
const int gid_n = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_c = blockIdx.y * blockDim.y + threadIdx.y;
const int gid_d = blockIdx.z * blockDim.z + threadIdx.z;
const bool valid = (gid_n < n) && (gid_c < c) && (gid_d < d);
if (valid) {
const int ix = offset_x + gid_n * n_x + gid_c * c_x + gid_d * d_x;
const int iy = offset_y + gid_n * n_y + gid_c * c_y + gid_d * d_y;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
if (x[ix + i * h_x + j * w_x] != y[iy + i * h_y + j * w_y]){
eq_flag[0]++;
}
}
};
}
} | .text
.file "tensor_5d_equals.hip"
.globl _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi # -- Begin function _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.p2align 4, 0x90
.type _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi,@function
_Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi: # @_Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 80(%rsp)
leaq 16(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 256(%rsp), %rax
movq %rax, 128(%rsp)
leaq 264(%rsp), %rax
movq %rax, 136(%rsp)
leaq 272(%rsp), %rax
movq %rax, 144(%rsp)
leaq 280(%rsp), %rax
movq %rax, 152(%rsp)
leaq 288(%rsp), %rax
movq %rax, 160(%rsp)
leaq 296(%rsp), %rax
movq %rax, 168(%rsp)
leaq 304(%rsp), %rax
movq %rax, 176(%rsp)
leaq 312(%rsp), %rax
movq %rax, 184(%rsp)
leaq 320(%rsp), %rax
movq %rax, 192(%rsp)
leaq 328(%rsp), %rax
movq %rax, 200(%rsp)
leaq 336(%rsp), %rax
movq %rax, 208(%rsp)
leaq 344(%rsp), %rax
movq %rax, 216(%rsp)
leaq 352(%rsp), %rax
movq %rax, 224(%rsp)
leaq 360(%rsp), %rax
movq %rax, 232(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end0:
.size _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, .Lfunc_end0-_Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi,@object # @_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.section .rodata,"a",@progbits
.globl _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.p2align 3, 0x0
_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi:
.quad _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.size _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi"
.size .L__unnamed_1, 46
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000b91bd_00000000-6_tensor_5d_equals.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi
.type _Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi, @function
_Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi:
.LFB2051:
.cfi_startproc
endbr64
subq $296, %rsp
.cfi_def_cfa_offset 304
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movl %edx, 36(%rsp)
movl %ecx, 32(%rsp)
movl %r8d, 28(%rsp)
movq %r9, 16(%rsp)
movq 352(%rsp), %rax
movq %rax, 8(%rsp)
movq 408(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 280(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 36(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 28(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 304(%rsp), %rax
movq %rax, 160(%rsp)
leaq 312(%rsp), %rax
movq %rax, 168(%rsp)
leaq 320(%rsp), %rax
movq %rax, 176(%rsp)
leaq 328(%rsp), %rax
movq %rax, 184(%rsp)
leaq 336(%rsp), %rax
movq %rax, 192(%rsp)
leaq 344(%rsp), %rax
movq %rax, 200(%rsp)
leaq 8(%rsp), %rax
movq %rax, 208(%rsp)
leaq 360(%rsp), %rax
movq %rax, 216(%rsp)
leaq 368(%rsp), %rax
movq %rax, 224(%rsp)
leaq 376(%rsp), %rax
movq %rax, 232(%rsp)
leaq 384(%rsp), %rax
movq %rax, 240(%rsp)
leaq 392(%rsp), %rax
movq %rax, 248(%rsp)
leaq 400(%rsp), %rax
movq %rax, 256(%rsp)
movq %rsp, %rax
movq %rax, 264(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 280(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $296, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 312
pushq 56(%rsp)
.cfi_def_cfa_offset 320
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 304
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi, .-_Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi
.globl _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.type _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, @function
_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 120(%rsp)
.cfi_def_cfa_offset 24
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 64
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 72
pushq 120(%rsp)
.cfi_def_cfa_offset 80
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 88
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 96
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 104
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 112
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 120
movl 120(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 128
call _Z59__device_stub__Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPiiiiiiPKfiiiiiiS0_iiiiiiPi
addq $120, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, .-_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "tensor_5d_equals.hip"
.globl _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi # -- Begin function _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.p2align 4, 0x90
.type _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi,@function
_Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi: # @_Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.cfi_startproc
# %bb.0:
subq $248, %rsp
.cfi_def_cfa_offset 256
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movq %r9, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 80(%rsp)
leaq 16(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 256(%rsp), %rax
movq %rax, 128(%rsp)
leaq 264(%rsp), %rax
movq %rax, 136(%rsp)
leaq 272(%rsp), %rax
movq %rax, 144(%rsp)
leaq 280(%rsp), %rax
movq %rax, 152(%rsp)
leaq 288(%rsp), %rax
movq %rax, 160(%rsp)
leaq 296(%rsp), %rax
movq %rax, 168(%rsp)
leaq 304(%rsp), %rax
movq %rax, 176(%rsp)
leaq 312(%rsp), %rax
movq %rax, 184(%rsp)
leaq 320(%rsp), %rax
movq %rax, 192(%rsp)
leaq 328(%rsp), %rax
movq %rax, 200(%rsp)
leaq 336(%rsp), %rax
movq %rax, 208(%rsp)
leaq 344(%rsp), %rax
movq %rax, 216(%rsp)
leaq 352(%rsp), %rax
movq %rax, 224(%rsp)
leaq 360(%rsp), %rax
movq %rax, 232(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $264, %rsp # imm = 0x108
.cfi_adjust_cfa_offset -264
retq
.Lfunc_end0:
.size _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, .Lfunc_end0-_Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi,@object # @_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.section .rodata,"a",@progbits
.globl _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.p2align 3, 0x0
_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi:
.quad _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.size _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi"
.size .L__unnamed_1, 46
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16tensor_5d_equalsiiiiiPKfiiiiiiS0_iiiiiiPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void cudaDmult_kernel(unsigned int size, const double *x1, const double *x2, double *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x1[i] * x2[i];
}
} | code for sm_80
Function : _Z16cudaDmult_kerneljPKdS0_Pd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ HFMA2.MMA R9, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff097435 */
/* 0x001fd400000001ff */
/*0080*/ IMAD.WIDE.U32 R2, R0, R9, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fc800078e0009 */
/*0090*/ IMAD.WIDE.U32 R4, R0.reuse, R9.reuse, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x0c0fe400078e0009 */
/*00a0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1b00 */
/*00b0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1b00 */
/*00c0*/ IMAD.WIDE.U32 R8, R0, R9, c[0x0][0x178] ; /* 0x00005e0000087625 */
/* 0x000fe200078e0009 */
/*00d0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */
/* 0x000fca0000000f00 */
/*00e0*/ IMAD R0, R11, c[0x0][0xc], R0 ; /* 0x000003000b007a24 */
/* 0x000fca00078e0200 */
/*00f0*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fe20003f06070 */
/*0100*/ DMUL R6, R4, R2 ; /* 0x0000000204067228 */
/* 0x004e0e0000000000 */
/*0110*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x0011ea000c101b04 */
/*0120*/ @!P0 BRA 0x70 ; /* 0xffffff4000008947 */
/* 0x000fea000383ffff */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void cudaDmult_kernel(unsigned int size, const double *x1, const double *x2, double *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x1[i] * x2[i];
}
} | .file "tmpxft_000fe001_00000000-6_cudaDmult_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd
.type _Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd, @function
_Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16cudaDmult_kerneljPKdS0_Pd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd, .-_Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd
.globl _Z16cudaDmult_kerneljPKdS0_Pd
.type _Z16cudaDmult_kerneljPKdS0_Pd, @function
_Z16cudaDmult_kerneljPKdS0_Pd:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16cudaDmult_kerneljPKdS0_Pd, .-_Z16cudaDmult_kerneljPKdS0_Pd
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16cudaDmult_kerneljPKdS0_Pd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16cudaDmult_kerneljPKdS0_Pd(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void cudaDmult_kernel(unsigned int size, const double *x1, const double *x2, double *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x1[i] * x2[i];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaDmult_kernel(unsigned int size, const double *x1, const double *x2, double *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x1[i] * x2[i];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaDmult_kernel(unsigned int size, const double *x1, const double *x2, double *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x1[i] * x2[i];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16cudaDmult_kerneljPKdS0_Pd
.globl _Z16cudaDmult_kerneljPKdS0_Pd
.p2align 8
.type _Z16cudaDmult_kerneljPKdS0_Pd,@function
_Z16cudaDmult_kerneljPKdS0_Pd:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x0
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s8, v1
s_cbranch_execz .LBB0_3
s_load_b32 s10, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x8
s_load_b64 s[2:3], s[0:1], 0x18
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s10, s9
s_mov_b32 s9, 0
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v5, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo
v_add_co_u32 v7, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v4, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s8, v1
global_load_b64 v[5:6], v[5:6], off
global_load_b64 v[7:8], v[7:8], off
v_add_co_u32 v3, s0, s2, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s0, s3, v4, s0
s_or_b32 s9, vcc_lo, s9
s_waitcnt vmcnt(0)
v_mul_f64 v[5:6], v[5:6], v[7:8]
global_store_b64 v[3:4], v[5:6], off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16cudaDmult_kerneljPKdS0_Pd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16cudaDmult_kerneljPKdS0_Pd, .Lfunc_end0-_Z16cudaDmult_kerneljPKdS0_Pd
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16cudaDmult_kerneljPKdS0_Pd
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16cudaDmult_kerneljPKdS0_Pd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaDmult_kernel(unsigned int size, const double *x1, const double *x2, double *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x1[i] * x2[i];
}
} | .text
.file "cudaDmult_kernel.hip"
.globl _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd # -- Begin function _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd
.p2align 4, 0x90
.type _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd,@function
_Z31__device_stub__cudaDmult_kerneljPKdS0_Pd: # @_Z31__device_stub__cudaDmult_kerneljPKdS0_Pd
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 4(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16cudaDmult_kerneljPKdS0_Pd, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd, .Lfunc_end0-_Z31__device_stub__cudaDmult_kerneljPKdS0_Pd
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16cudaDmult_kerneljPKdS0_Pd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16cudaDmult_kerneljPKdS0_Pd,@object # @_Z16cudaDmult_kerneljPKdS0_Pd
.section .rodata,"a",@progbits
.globl _Z16cudaDmult_kerneljPKdS0_Pd
.p2align 3, 0x0
_Z16cudaDmult_kerneljPKdS0_Pd:
.quad _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd
.size _Z16cudaDmult_kerneljPKdS0_Pd, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16cudaDmult_kerneljPKdS0_Pd"
.size .L__unnamed_1, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16cudaDmult_kerneljPKdS0_Pd
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16cudaDmult_kerneljPKdS0_Pd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0070*/ HFMA2.MMA R9, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff097435 */
/* 0x001fd400000001ff */
/*0080*/ IMAD.WIDE.U32 R2, R0, R9, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fc800078e0009 */
/*0090*/ IMAD.WIDE.U32 R4, R0.reuse, R9.reuse, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x0c0fe400078e0009 */
/*00a0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1b00 */
/*00b0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1b00 */
/*00c0*/ IMAD.WIDE.U32 R8, R0, R9, c[0x0][0x178] ; /* 0x00005e0000087625 */
/* 0x000fe200078e0009 */
/*00d0*/ MOV R11, c[0x0][0x0] ; /* 0x00000000000b7a02 */
/* 0x000fca0000000f00 */
/*00e0*/ IMAD R0, R11, c[0x0][0xc], R0 ; /* 0x000003000b007a24 */
/* 0x000fca00078e0200 */
/*00f0*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fe20003f06070 */
/*0100*/ DMUL R6, R4, R2 ; /* 0x0000000204067228 */
/* 0x004e0e0000000000 */
/*0110*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x0011ea000c101b04 */
/*0120*/ @!P0 BRA 0x70 ; /* 0xffffff4000008947 */
/* 0x000fea000383ffff */
/*0130*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0140*/ BRA 0x140; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16cudaDmult_kerneljPKdS0_Pd
.globl _Z16cudaDmult_kerneljPKdS0_Pd
.p2align 8
.type _Z16cudaDmult_kerneljPKdS0_Pd,@function
_Z16cudaDmult_kerneljPKdS0_Pd:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x2c
s_load_b32 s8, s[0:1], 0x0
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s9, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_u32_e64 s8, v1
s_cbranch_execz .LBB0_3
s_load_b32 s10, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x8
s_load_b64 s[2:3], s[0:1], 0x18
v_mov_b32_e32 v2, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s1, s10, s9
s_mov_b32 s9, 0
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v5, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo
v_add_co_u32 v7, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v4, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s8, v1
global_load_b64 v[5:6], v[5:6], off
global_load_b64 v[7:8], v[7:8], off
v_add_co_u32 v3, s0, s2, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s0, s3, v4, s0
s_or_b32 s9, vcc_lo, s9
s_waitcnt vmcnt(0)
v_mul_f64 v[5:6], v[5:6], v[7:8]
global_store_b64 v[3:4], v[5:6], off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16cudaDmult_kerneljPKdS0_Pd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16cudaDmult_kerneljPKdS0_Pd, .Lfunc_end0-_Z16cudaDmult_kerneljPKdS0_Pd
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16cudaDmult_kerneljPKdS0_Pd
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16cudaDmult_kerneljPKdS0_Pd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000fe001_00000000-6_cudaDmult_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd
.type _Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd, @function
_Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16cudaDmult_kerneljPKdS0_Pd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd, .-_Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd
.globl _Z16cudaDmult_kerneljPKdS0_Pd
.type _Z16cudaDmult_kerneljPKdS0_Pd, @function
_Z16cudaDmult_kerneljPKdS0_Pd:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z16cudaDmult_kerneljPKdS0_PdjPKdS0_Pd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16cudaDmult_kerneljPKdS0_Pd, .-_Z16cudaDmult_kerneljPKdS0_Pd
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16cudaDmult_kerneljPKdS0_Pd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16cudaDmult_kerneljPKdS0_Pd(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudaDmult_kernel.hip"
.globl _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd # -- Begin function _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd
.p2align 4, 0x90
.type _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd,@function
_Z31__device_stub__cudaDmult_kerneljPKdS0_Pd: # @_Z31__device_stub__cudaDmult_kerneljPKdS0_Pd
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 4(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16cudaDmult_kerneljPKdS0_Pd, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd, .Lfunc_end0-_Z31__device_stub__cudaDmult_kerneljPKdS0_Pd
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16cudaDmult_kerneljPKdS0_Pd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16cudaDmult_kerneljPKdS0_Pd,@object # @_Z16cudaDmult_kerneljPKdS0_Pd
.section .rodata,"a",@progbits
.globl _Z16cudaDmult_kerneljPKdS0_Pd
.p2align 3, 0x0
_Z16cudaDmult_kerneljPKdS0_Pd:
.quad _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd
.size _Z16cudaDmult_kerneljPKdS0_Pd, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16cudaDmult_kerneljPKdS0_Pd"
.size .L__unnamed_1, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__cudaDmult_kerneljPKdS0_Pd
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16cudaDmult_kerneljPKdS0_Pd
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
#define BLOCK_SIZE 512
#define BLOCK_SIZE_HOUGH 360
#define STEP_SIZE 5
#define NUMBER_OF_STEPS 360/STEP_SIZE
// Circ mask kernel storage
__constant__ int maskKernelX[NUMBER_OF_STEPS];
__constant__ int maskKernelY[NUMBER_OF_STEPS];
// Function to set precalculated relative coordinates for circle boundary coordinates
__global__ void AdjustImageIntensityKernel(float *imgOut, float *imgIn, int width, int height, float lowin, float lowout, float scale)
{
__shared__ float bufData[BLOCK_SIZE];
// Get the index of pixel
const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Load data to shared variable
bufData[threadIdx.x] = imgIn[index];
// Check that it's not out of bounds
if (index < (height*width)) {
// Find the according multiplier
float tempLevel = ( bufData[threadIdx.x] - lowin)*scale + lowout;
// Check that it's within required range
if (tempLevel < 0) {
bufData[threadIdx.x] = 0;
}
else if (tempLevel > 1) {
bufData[threadIdx.x] = 1;
}
else {
bufData[threadIdx.x] = tempLevel;
}
// Write data back
imgOut[index] = bufData[threadIdx.x];
}
// Synchronise threads to have the whole image fully processed for output
__syncthreads();
} | code for sm_80
Function : _Z26AdjustImageIntensityKernelPfS_iifff
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*0050*/ ULDC.64 UR4, c[0x0][0x170] ; /* 0x00005c0000047ab9 */
/* 0x000fe20000000a00 */
/*0060*/ LEA R0, R0, R7, 0x9 ; /* 0x0000000700007211 */
/* 0x001fca00078e48ff */
/*0070*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0090*/ UIMAD UR4, UR5, UR4, URZ ; /* 0x00000004050472a4 */
/* 0x000fe2000f8e023f */
/*00a0*/ BSSY B0, 0x230 ; /* 0x0000018000007945 */
/* 0x000fea0003800000 */
/*00b0*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf06270 */
/*00c0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041d80000004800 */
/*00d0*/ @P0 BRA 0x220 ; /* 0x0000014000000947 */
/* 0x000fea0003800000 */
/*00e0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff037624 */
/* 0x000fe200078e00ff */
/*00f0*/ BSSY B1, 0x1f0 ; /* 0x000000f000017945 */
/* 0x000fe20003800000 */
/*0100*/ FADD R2, R2, -c[0x0][0x178] ; /* 0x80005e0002027621 */
/* 0x001fc80000000000 */
/*0110*/ FFMA R2, R2, R3, c[0x0][0x17c] ; /* 0x00005f0002027623 */
/* 0x000fe20000000003 */
/*0120*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x000fc80000011400 */
/*0130*/ FSETP.GEU.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720b */
/* 0x000fda0003f0e000 */
/*0140*/ @!P0 BRA 0x1c0 ; /* 0x0000007000008947 */
/* 0x000fea0003800000 */
/*0150*/ MOV R5, R2 ; /* 0x0000000200057202 */
/* 0x000fc80000000f00 */
/*0160*/ FSETP.GT.AND P0, PT, R5, 1, PT ; /* 0x3f8000000500780b */
/* 0x000fda0003f04000 */
/*0170*/ @P0 IMAD.MOV.U32 R2, RZ, RZ, 0x3f800000 ; /* 0x3f800000ff020424 */
/* 0x000fe200078e00ff */
/*0180*/ @!P0 STS [R7.X4], R5 ; /* 0x0000000507008388 */
/* 0x0001e80000004800 */
/*0190*/ @P0 STS [R7.X4], R2 ; /* 0x0000000207000388 */
/* 0x0001e20000004800 */
/*01a0*/ @P0 MOV R5, 0x3f800000 ; /* 0x3f80000000050802 */
/* 0x000fe20000000f00 */
/*01b0*/ BRA 0x1e0 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*01c0*/ STS [R7.X4], RZ ; /* 0x000000ff07007388 */
/* 0x0001e20000004800 */
/*01d0*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */
/* 0x000fc600078e00ff */
/*01e0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*01f0*/ LEA R2, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000027a11 */
/* 0x001fc800078010ff */
/*0200*/ LEA.HI.X R3, R0, c[0x0][0x164], R3, 0x2, P0 ; /* 0x0000590000037a11 */
/* 0x000fca00000f1403 */
/*0210*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x0001e8000c101906 */
/*0220*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0230*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0240*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0250*/ BRA 0x250; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
#define BLOCK_SIZE 512
#define BLOCK_SIZE_HOUGH 360
#define STEP_SIZE 5
#define NUMBER_OF_STEPS 360/STEP_SIZE
// Circ mask kernel storage
__constant__ int maskKernelX[NUMBER_OF_STEPS];
__constant__ int maskKernelY[NUMBER_OF_STEPS];
// Function to set precalculated relative coordinates for circle boundary coordinates
__global__ void AdjustImageIntensityKernel(float *imgOut, float *imgIn, int width, int height, float lowin, float lowout, float scale)
{
__shared__ float bufData[BLOCK_SIZE];
// Get the index of pixel
const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Load data to shared variable
bufData[threadIdx.x] = imgIn[index];
// Check that it's not out of bounds
if (index < (height*width)) {
// Find the according multiplier
float tempLevel = ( bufData[threadIdx.x] - lowin)*scale + lowout;
// Check that it's within required range
if (tempLevel < 0) {
bufData[threadIdx.x] = 0;
}
else if (tempLevel > 1) {
bufData[threadIdx.x] = 1;
}
else {
bufData[threadIdx.x] = tempLevel;
}
// Write data back
imgOut[index] = bufData[threadIdx.x];
}
// Synchronise threads to have the whole image fully processed for output
__syncthreads();
} | .file "tmpxft_00002d43_00000000-6_AdjustImageIntensityKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff
.type _Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff, @function
_Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movl %edx, 28(%rsp)
movl %ecx, 24(%rsp)
movss %xmm0, 20(%rsp)
movss %xmm1, 16(%rsp)
movss %xmm2, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 12(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z26AdjustImageIntensityKernelPfS_iifff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff, .-_Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff
.globl _Z26AdjustImageIntensityKernelPfS_iifff
.type _Z26AdjustImageIntensityKernelPfS_iifff, @function
_Z26AdjustImageIntensityKernelPfS_iifff:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z26AdjustImageIntensityKernelPfS_iifff, .-_Z26AdjustImageIntensityKernelPfS_iifff
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z26AdjustImageIntensityKernelPfS_iifff"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "maskKernelX"
.LC2:
.string "maskKernelY"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z26AdjustImageIntensityKernelPfS_iifff(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $288, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL11maskKernelX(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $288, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL11maskKernelY(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL11maskKernelY
.comm _ZL11maskKernelY,288,32
.local _ZL11maskKernelX
.comm _ZL11maskKernelX,288,32
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
#define BLOCK_SIZE 512
#define BLOCK_SIZE_HOUGH 360
#define STEP_SIZE 5
#define NUMBER_OF_STEPS 360/STEP_SIZE
// Circ mask kernel storage
__constant__ int maskKernelX[NUMBER_OF_STEPS];
__constant__ int maskKernelY[NUMBER_OF_STEPS];
// Function to set precalculated relative coordinates for circle boundary coordinates
__global__ void AdjustImageIntensityKernel(float *imgOut, float *imgIn, int width, int height, float lowin, float lowout, float scale)
{
__shared__ float bufData[BLOCK_SIZE];
// Get the index of pixel
const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Load data to shared variable
bufData[threadIdx.x] = imgIn[index];
// Check that it's not out of bounds
if (index < (height*width)) {
// Find the according multiplier
float tempLevel = ( bufData[threadIdx.x] - lowin)*scale + lowout;
// Check that it's within required range
if (tempLevel < 0) {
bufData[threadIdx.x] = 0;
}
else if (tempLevel > 1) {
bufData[threadIdx.x] = 1;
}
else {
bufData[threadIdx.x] = tempLevel;
}
// Write data back
imgOut[index] = bufData[threadIdx.x];
}
// Synchronise threads to have the whole image fully processed for output
__syncthreads();
} | #include <hip/hip_runtime.h>
#include "includes.h"
#define BLOCK_SIZE 512
#define BLOCK_SIZE_HOUGH 360
#define STEP_SIZE 5
#define NUMBER_OF_STEPS 360/STEP_SIZE
// Circ mask kernel storage
__constant__ int maskKernelX[NUMBER_OF_STEPS];
__constant__ int maskKernelY[NUMBER_OF_STEPS];
// Function to set precalculated relative coordinates for circle boundary coordinates
__global__ void AdjustImageIntensityKernel(float *imgOut, float *imgIn, int width, int height, float lowin, float lowout, float scale)
{
__shared__ float bufData[BLOCK_SIZE];
// Get the index of pixel
const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Load data to shared variable
bufData[threadIdx.x] = imgIn[index];
// Check that it's not out of bounds
if (index < (height*width)) {
// Find the according multiplier
float tempLevel = ( bufData[threadIdx.x] - lowin)*scale + lowout;
// Check that it's within required range
if (tempLevel < 0) {
bufData[threadIdx.x] = 0;
}
else if (tempLevel > 1) {
bufData[threadIdx.x] = 1;
}
else {
bufData[threadIdx.x] = tempLevel;
}
// Write data back
imgOut[index] = bufData[threadIdx.x];
}
// Synchronise threads to have the whole image fully processed for output
__syncthreads();
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
#define BLOCK_SIZE 512
#define BLOCK_SIZE_HOUGH 360
#define STEP_SIZE 5
#define NUMBER_OF_STEPS 360/STEP_SIZE
// Circ mask kernel storage
__constant__ int maskKernelX[NUMBER_OF_STEPS];
__constant__ int maskKernelY[NUMBER_OF_STEPS];
// Function to set precalculated relative coordinates for circle boundary coordinates
__global__ void AdjustImageIntensityKernel(float *imgOut, float *imgIn, int width, int height, float lowin, float lowout, float scale)
{
__shared__ float bufData[BLOCK_SIZE];
// Get the index of pixel
const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Load data to shared variable
bufData[threadIdx.x] = imgIn[index];
// Check that it's not out of bounds
if (index < (height*width)) {
// Find the according multiplier
float tempLevel = ( bufData[threadIdx.x] - lowin)*scale + lowout;
// Check that it's within required range
if (tempLevel < 0) {
bufData[threadIdx.x] = 0;
}
else if (tempLevel > 1) {
bufData[threadIdx.x] = 1;
}
else {
bufData[threadIdx.x] = tempLevel;
}
// Write data back
imgOut[index] = bufData[threadIdx.x];
}
// Synchronise threads to have the whole image fully processed for output
__syncthreads();
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z26AdjustImageIntensityKernelPfS_iifff
.globl _Z26AdjustImageIntensityKernelPfS_iifff
.p2align 8
.type _Z26AdjustImageIntensityKernelPfS_iifff,@function
_Z26AdjustImageIntensityKernelPfS_iifff:
s_load_b128 s[4:7], s[0:1], 0x8
v_lshl_add_u32 v4, s15, 9, v0
v_lshlrev_b32_e32 v0, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[1:2], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v2, vcc_lo
s_mul_i32 s2, s7, s6
s_delay_alu instid0(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v4
global_load_b32 v3, v[5:6], off
s_waitcnt vmcnt(0)
ds_store_b32 v0, v3
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b64 s[4:5], s[0:1], 0x18
s_load_b32 s3, s[0:1], 0x20
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_subrev_f32_e32 v3, s4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v3, v3, s3, s5
v_cmp_nlt_f32_e32 vcc_lo, 1.0, v3
v_cndmask_b32_e32 v4, 1.0, v3, vcc_lo
v_cmp_ngt_f32_e32 vcc_lo, 0, v3
s_delay_alu instid0(VALU_DEP_2)
v_cndmask_b32_e32 v3, 0, v4, vcc_lo
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
ds_store_b32 v0, v3
global_store_b32 v[1:2], v3, off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s2
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z26AdjustImageIntensityKernelPfS_iifff
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 36
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z26AdjustImageIntensityKernelPfS_iifff, .Lfunc_end0-_Z26AdjustImageIntensityKernelPfS_iifff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected maskKernelX
.type maskKernelX,@object
.section .bss,"aw",@nobits
.globl maskKernelX
.p2align 4, 0x0
maskKernelX:
.zero 288
.size maskKernelX, 288
.protected maskKernelY
.type maskKernelY,@object
.globl maskKernelY
.p2align 4, 0x0
maskKernelY:
.zero 288
.size maskKernelY, 288
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 36
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z26AdjustImageIntensityKernelPfS_iifff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z26AdjustImageIntensityKernelPfS_iifff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
#define BLOCK_SIZE 512
#define BLOCK_SIZE_HOUGH 360
#define STEP_SIZE 5
#define NUMBER_OF_STEPS 360/STEP_SIZE
// Circ mask kernel storage
__constant__ int maskKernelX[NUMBER_OF_STEPS];
__constant__ int maskKernelY[NUMBER_OF_STEPS];
// Function to set precalculated relative coordinates for circle boundary coordinates
__global__ void AdjustImageIntensityKernel(float *imgOut, float *imgIn, int width, int height, float lowin, float lowout, float scale)
{
__shared__ float bufData[BLOCK_SIZE];
// Get the index of pixel
const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Load data to shared variable
bufData[threadIdx.x] = imgIn[index];
// Check that it's not out of bounds
if (index < (height*width)) {
// Find the according multiplier
float tempLevel = ( bufData[threadIdx.x] - lowin)*scale + lowout;
// Check that it's within required range
if (tempLevel < 0) {
bufData[threadIdx.x] = 0;
}
else if (tempLevel > 1) {
bufData[threadIdx.x] = 1;
}
else {
bufData[threadIdx.x] = tempLevel;
}
// Write data back
imgOut[index] = bufData[threadIdx.x];
}
// Synchronise threads to have the whole image fully processed for output
__syncthreads();
} | .text
.file "AdjustImageIntensityKernel.hip"
.globl _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff # -- Begin function _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff
.p2align 4, 0x90
.type _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff,@function
_Z41__device_stub__AdjustImageIntensityKernelPfS_iifff: # @_Z41__device_stub__AdjustImageIntensityKernelPfS_iifff
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movl %edx, 28(%rsp)
movl %ecx, 24(%rsp)
movss %xmm0, 20(%rsp)
movss %xmm1, 16(%rsp)
movss %xmm2, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 24(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z26AdjustImageIntensityKernelPfS_iifff, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff, .Lfunc_end0-_Z41__device_stub__AdjustImageIntensityKernelPfS_iifff
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z26AdjustImageIntensityKernelPfS_iifff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $maskKernelX, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $288, %r9d # imm = 0x120
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $maskKernelY, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $288, %r9d # imm = 0x120
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type maskKernelX,@object # @maskKernelX
.local maskKernelX
.comm maskKernelX,288,16
.type maskKernelY,@object # @maskKernelY
.local maskKernelY
.comm maskKernelY,288,16
.type _Z26AdjustImageIntensityKernelPfS_iifff,@object # @_Z26AdjustImageIntensityKernelPfS_iifff
.section .rodata,"a",@progbits
.globl _Z26AdjustImageIntensityKernelPfS_iifff
.p2align 3, 0x0
_Z26AdjustImageIntensityKernelPfS_iifff:
.quad _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff
.size _Z26AdjustImageIntensityKernelPfS_iifff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z26AdjustImageIntensityKernelPfS_iifff"
.size .L__unnamed_1, 40
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "maskKernelX"
.size .L__unnamed_2, 12
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "maskKernelY"
.size .L__unnamed_3, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym maskKernelX
.addrsig_sym maskKernelY
.addrsig_sym _Z26AdjustImageIntensityKernelPfS_iifff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z26AdjustImageIntensityKernelPfS_iifff
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*0050*/ ULDC.64 UR4, c[0x0][0x170] ; /* 0x00005c0000047ab9 */
/* 0x000fe20000000a00 */
/*0060*/ LEA R0, R0, R7, 0x9 ; /* 0x0000000700007211 */
/* 0x001fca00078e48ff */
/*0070*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0203 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0090*/ UIMAD UR4, UR5, UR4, URZ ; /* 0x00000004050472a4 */
/* 0x000fe2000f8e023f */
/*00a0*/ BSSY B0, 0x230 ; /* 0x0000018000007945 */
/* 0x000fea0003800000 */
/*00b0*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf06270 */
/*00c0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041d80000004800 */
/*00d0*/ @P0 BRA 0x220 ; /* 0x0000014000000947 */
/* 0x000fea0003800000 */
/*00e0*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff037624 */
/* 0x000fe200078e00ff */
/*00f0*/ BSSY B1, 0x1f0 ; /* 0x000000f000017945 */
/* 0x000fe20003800000 */
/*0100*/ FADD R2, R2, -c[0x0][0x178] ; /* 0x80005e0002027621 */
/* 0x001fc80000000000 */
/*0110*/ FFMA R2, R2, R3, c[0x0][0x17c] ; /* 0x00005f0002027623 */
/* 0x000fe20000000003 */
/*0120*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */
/* 0x000fc80000011400 */
/*0130*/ FSETP.GEU.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720b */
/* 0x000fda0003f0e000 */
/*0140*/ @!P0 BRA 0x1c0 ; /* 0x0000007000008947 */
/* 0x000fea0003800000 */
/*0150*/ MOV R5, R2 ; /* 0x0000000200057202 */
/* 0x000fc80000000f00 */
/*0160*/ FSETP.GT.AND P0, PT, R5, 1, PT ; /* 0x3f8000000500780b */
/* 0x000fda0003f04000 */
/*0170*/ @P0 IMAD.MOV.U32 R2, RZ, RZ, 0x3f800000 ; /* 0x3f800000ff020424 */
/* 0x000fe200078e00ff */
/*0180*/ @!P0 STS [R7.X4], R5 ; /* 0x0000000507008388 */
/* 0x0001e80000004800 */
/*0190*/ @P0 STS [R7.X4], R2 ; /* 0x0000000207000388 */
/* 0x0001e20000004800 */
/*01a0*/ @P0 MOV R5, 0x3f800000 ; /* 0x3f80000000050802 */
/* 0x000fe20000000f00 */
/*01b0*/ BRA 0x1e0 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*01c0*/ STS [R7.X4], RZ ; /* 0x000000ff07007388 */
/* 0x0001e20000004800 */
/*01d0*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */
/* 0x000fc600078e00ff */
/*01e0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*01f0*/ LEA R2, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000027a11 */
/* 0x001fc800078010ff */
/*0200*/ LEA.HI.X R3, R0, c[0x0][0x164], R3, 0x2, P0 ; /* 0x0000590000037a11 */
/* 0x000fca00000f1403 */
/*0210*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x0001e8000c101906 */
/*0220*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0230*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0240*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0250*/ BRA 0x250; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z26AdjustImageIntensityKernelPfS_iifff
.globl _Z26AdjustImageIntensityKernelPfS_iifff
.p2align 8
.type _Z26AdjustImageIntensityKernelPfS_iifff,@function
_Z26AdjustImageIntensityKernelPfS_iifff:
s_load_b128 s[4:7], s[0:1], 0x8
v_lshl_add_u32 v4, s15, 9, v0
v_lshlrev_b32_e32 v0, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b64 v[1:2], 2, v[4:5]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v2, vcc_lo
s_mul_i32 s2, s7, s6
s_delay_alu instid0(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v4
global_load_b32 v3, v[5:6], off
s_waitcnt vmcnt(0)
ds_store_b32 v0, v3
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b64 s[4:5], s[0:1], 0x18
s_load_b32 s3, s[0:1], 0x20
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_subrev_f32_e32 v3, s4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v3, v3, s3, s5
v_cmp_nlt_f32_e32 vcc_lo, 1.0, v3
v_cndmask_b32_e32 v4, 1.0, v3, vcc_lo
v_cmp_ngt_f32_e32 vcc_lo, 0, v3
s_delay_alu instid0(VALU_DEP_2)
v_cndmask_b32_e32 v3, 0, v4, vcc_lo
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
ds_store_b32 v0, v3
global_store_b32 v[1:2], v3, off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s2
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z26AdjustImageIntensityKernelPfS_iifff
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 36
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z26AdjustImageIntensityKernelPfS_iifff, .Lfunc_end0-_Z26AdjustImageIntensityKernelPfS_iifff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected maskKernelX
.type maskKernelX,@object
.section .bss,"aw",@nobits
.globl maskKernelX
.p2align 4, 0x0
maskKernelX:
.zero 288
.size maskKernelX, 288
.protected maskKernelY
.type maskKernelY,@object
.globl maskKernelY
.p2align 4, 0x0
maskKernelY:
.zero 288
.size maskKernelY, 288
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 36
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z26AdjustImageIntensityKernelPfS_iifff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z26AdjustImageIntensityKernelPfS_iifff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00002d43_00000000-6_AdjustImageIntensityKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff
.type _Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff, @function
_Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movl %edx, 28(%rsp)
movl %ecx, 24(%rsp)
movss %xmm0, 20(%rsp)
movss %xmm1, 16(%rsp)
movss %xmm2, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 16(%rsp), %rax
movq %rax, 152(%rsp)
leaq 12(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z26AdjustImageIntensityKernelPfS_iifff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff, .-_Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff
.globl _Z26AdjustImageIntensityKernelPfS_iifff
.type _Z26AdjustImageIntensityKernelPfS_iifff, @function
_Z26AdjustImageIntensityKernelPfS_iifff:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z26AdjustImageIntensityKernelPfS_iifffPfS_iifff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z26AdjustImageIntensityKernelPfS_iifff, .-_Z26AdjustImageIntensityKernelPfS_iifff
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z26AdjustImageIntensityKernelPfS_iifff"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "maskKernelX"
.LC2:
.string "maskKernelY"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z26AdjustImageIntensityKernelPfS_iifff(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $288, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL11maskKernelX(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $288, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL11maskKernelY(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL11maskKernelY
.comm _ZL11maskKernelY,288,32
.local _ZL11maskKernelX
.comm _ZL11maskKernelX,288,32
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "AdjustImageIntensityKernel.hip"
.globl _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff # -- Begin function _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff
.p2align 4, 0x90
.type _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff,@function
_Z41__device_stub__AdjustImageIntensityKernelPfS_iifff: # @_Z41__device_stub__AdjustImageIntensityKernelPfS_iifff
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movl %edx, 28(%rsp)
movl %ecx, 24(%rsp)
movss %xmm0, 20(%rsp)
movss %xmm1, 16(%rsp)
movss %xmm2, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 24(%rsp), %rax
movq %rax, 120(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z26AdjustImageIntensityKernelPfS_iifff, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff, .Lfunc_end0-_Z41__device_stub__AdjustImageIntensityKernelPfS_iifff
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z26AdjustImageIntensityKernelPfS_iifff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $maskKernelX, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $288, %r9d # imm = 0x120
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $maskKernelY, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $288, %r9d # imm = 0x120
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type maskKernelX,@object # @maskKernelX
.local maskKernelX
.comm maskKernelX,288,16
.type maskKernelY,@object # @maskKernelY
.local maskKernelY
.comm maskKernelY,288,16
.type _Z26AdjustImageIntensityKernelPfS_iifff,@object # @_Z26AdjustImageIntensityKernelPfS_iifff
.section .rodata,"a",@progbits
.globl _Z26AdjustImageIntensityKernelPfS_iifff
.p2align 3, 0x0
_Z26AdjustImageIntensityKernelPfS_iifff:
.quad _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff
.size _Z26AdjustImageIntensityKernelPfS_iifff, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z26AdjustImageIntensityKernelPfS_iifff"
.size .L__unnamed_1, 40
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "maskKernelX"
.size .L__unnamed_2, 12
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "maskKernelY"
.size .L__unnamed_3, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z41__device_stub__AdjustImageIntensityKernelPfS_iifff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym maskKernelX
.addrsig_sym maskKernelY
.addrsig_sym _Z26AdjustImageIntensityKernelPfS_iifff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | __global__
void gIncr(float *d, size_t ind, float delta) {
d[ind] += delta;
}
__global__
void gSum(float *d, size_t size, float *total) {
total = 0;
for (size_t i = 0; i < size; ++i) {
*total += d[i];
}
} | code for sm_80
Function : _Z4gSumPfmS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ ISETP.NE.U32.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fc80003f05070 */
/*0020*/ ISETP.NE.AND.EX P0, PT, RZ, c[0x0][0x16c], PT, P0 ; /* 0x00005b00ff007a0c */
/* 0x000fda0003f05300 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ BPT.TRAP 0x1 ; /* 0x000000040000795c */
/* 0x000fea0000300000 */
/*0050*/ BRA 0x50; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z5gIncrPfmf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe40000000a00 */
/*0020*/ ULDC.64 UR4, c[0x0][0x160] ; /* 0x0000580000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ ULEA UR4, UP0, UR6, UR4, 0x2 ; /* 0x0000000406047291 */
/* 0x000fc8000f80103f */
/*0040*/ ULEA.HI.X UR5, UR6, UR5, UR7, 0x2, UP0 ; /* 0x0000000506057291 */
/* 0x000fe400080f1407 */
/*0050*/ MOV R2, UR4 ; /* 0x0000000400027c02 */
/* 0x000fc80008000f00 */
/*0060*/ MOV R3, UR5 ; /* 0x0000000500037c02 */
/* 0x000fe20008000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*0080*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0090*/ FADD R5, R0, c[0x0][0x170] ; /* 0x00005c0000057621 */
/* 0x004fca0000000000 */
/*00a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __global__
void gIncr(float *d, size_t ind, float delta) {
d[ind] += delta;
}
__global__
void gSum(float *d, size_t size, float *total) {
total = 0;
for (size_t i = 0; i < size; ++i) {
*total += d[i];
}
} | .file "tmpxft_000acc46_00000000-6_sum.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z5gIncrPfmfPfmf
.type _Z26__device_stub__Z5gIncrPfmfPfmf, @function
_Z26__device_stub__Z5gIncrPfmfPfmf:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5gIncrPfmf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z26__device_stub__Z5gIncrPfmfPfmf, .-_Z26__device_stub__Z5gIncrPfmfPfmf
.globl _Z5gIncrPfmf
.type _Z5gIncrPfmf, @function
_Z5gIncrPfmf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z5gIncrPfmfPfmf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z5gIncrPfmf, .-_Z5gIncrPfmf
.globl _Z26__device_stub__Z4gSumPfmS_PfmS_
.type _Z26__device_stub__Z4gSumPfmS_PfmS_, @function
_Z26__device_stub__Z4gSumPfmS_PfmS_:
.LFB2053:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4gSumPfmS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z26__device_stub__Z4gSumPfmS_PfmS_, .-_Z26__device_stub__Z4gSumPfmS_PfmS_
.globl _Z4gSumPfmS_
.type _Z4gSumPfmS_, @function
_Z4gSumPfmS_:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z4gSumPfmS_PfmS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z4gSumPfmS_, .-_Z4gSumPfmS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4gSumPfmS_"
.LC1:
.string "_Z5gIncrPfmf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4gSumPfmS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z5gIncrPfmf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __global__
void gIncr(float *d, size_t ind, float delta) {
d[ind] += delta;
}
__global__
void gSum(float *d, size_t size, float *total) {
total = 0;
for (size_t i = 0; i < size; ++i) {
*total += d[i];
}
} | #include <hip/hip_runtime.h>
__global__
void gIncr(float *d, size_t ind, float delta) {
d[ind] += delta;
}
__global__
void gSum(float *d, size_t size, float *total) {
total = 0;
for (size_t i = 0; i < size; ++i) {
*total += d[i];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__
void gIncr(float *d, size_t ind, float delta) {
d[ind] += delta;
}
__global__
void gSum(float *d, size_t size, float *total) {
total = 0;
for (size_t i = 0; i < size; ++i) {
*total += d[i];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5gIncrPfmf
.globl _Z5gIncrPfmf
.p2align 8
.type _Z5gIncrPfmf,@function
_Z5gIncrPfmf:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s2, s[0:1], 0x10
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_lshl_b64 s[0:1], s[6:7], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s0, s4, s0
s_addc_u32 s1, s5, s1
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_add_f32_e64 v1, s3, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5gIncrPfmf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5gIncrPfmf, .Lfunc_end0-_Z5gIncrPfmf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4gSumPfmS_
.globl _Z4gSumPfmS_
.p2align 8
.type _Z4gSumPfmS_,@function
_Z4gSumPfmS_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4gSumPfmS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z4gSumPfmS_, .Lfunc_end1-_Z4gSumPfmS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5gIncrPfmf
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z5gIncrPfmf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4gSumPfmS_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z4gSumPfmS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__
void gIncr(float *d, size_t ind, float delta) {
d[ind] += delta;
}
__global__
void gSum(float *d, size_t size, float *total) {
total = 0;
for (size_t i = 0; i < size; ++i) {
*total += d[i];
}
} | .text
.file "sum.hip"
.globl _Z20__device_stub__gIncrPfmf # -- Begin function _Z20__device_stub__gIncrPfmf
.p2align 4, 0x90
.type _Z20__device_stub__gIncrPfmf,@function
_Z20__device_stub__gIncrPfmf: # @_Z20__device_stub__gIncrPfmf
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5gIncrPfmf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z20__device_stub__gIncrPfmf, .Lfunc_end0-_Z20__device_stub__gIncrPfmf
.cfi_endproc
# -- End function
.globl _Z19__device_stub__gSumPfmS_ # -- Begin function _Z19__device_stub__gSumPfmS_
.p2align 4, 0x90
.type _Z19__device_stub__gSumPfmS_,@function
_Z19__device_stub__gSumPfmS_: # @_Z19__device_stub__gSumPfmS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4gSumPfmS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z19__device_stub__gSumPfmS_, .Lfunc_end1-_Z19__device_stub__gSumPfmS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5gIncrPfmf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4gSumPfmS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5gIncrPfmf,@object # @_Z5gIncrPfmf
.section .rodata,"a",@progbits
.globl _Z5gIncrPfmf
.p2align 3, 0x0
_Z5gIncrPfmf:
.quad _Z20__device_stub__gIncrPfmf
.size _Z5gIncrPfmf, 8
.type _Z4gSumPfmS_,@object # @_Z4gSumPfmS_
.globl _Z4gSumPfmS_
.p2align 3, 0x0
_Z4gSumPfmS_:
.quad _Z19__device_stub__gSumPfmS_
.size _Z4gSumPfmS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5gIncrPfmf"
.size .L__unnamed_1, 13
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z4gSumPfmS_"
.size .L__unnamed_2, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__gIncrPfmf
.addrsig_sym _Z19__device_stub__gSumPfmS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5gIncrPfmf
.addrsig_sym _Z4gSumPfmS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z4gSumPfmS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ ISETP.NE.U32.AND P0, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fc80003f05070 */
/*0020*/ ISETP.NE.AND.EX P0, PT, RZ, c[0x0][0x16c], PT, P0 ; /* 0x00005b00ff007a0c */
/* 0x000fda0003f05300 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ BPT.TRAP 0x1 ; /* 0x000000040000795c */
/* 0x000fea0000300000 */
/*0050*/ BRA 0x50; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z5gIncrPfmf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe40000000a00 */
/*0020*/ ULDC.64 UR4, c[0x0][0x160] ; /* 0x0000580000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ ULEA UR4, UP0, UR6, UR4, 0x2 ; /* 0x0000000406047291 */
/* 0x000fc8000f80103f */
/*0040*/ ULEA.HI.X UR5, UR6, UR5, UR7, 0x2, UP0 ; /* 0x0000000506057291 */
/* 0x000fe400080f1407 */
/*0050*/ MOV R2, UR4 ; /* 0x0000000400027c02 */
/* 0x000fc80008000f00 */
/*0060*/ MOV R3, UR5 ; /* 0x0000000500037c02 */
/* 0x000fe20008000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*0080*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0090*/ FADD R5, R0, c[0x0][0x170] ; /* 0x00005c0000057621 */
/* 0x004fca0000000000 */
/*00a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5gIncrPfmf
.globl _Z5gIncrPfmf
.p2align 8
.type _Z5gIncrPfmf,@function
_Z5gIncrPfmf:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s2, s[0:1], 0x10
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_lshl_b64 s[0:1], s[6:7], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s0, s4, s0
s_addc_u32 s1, s5, s1
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_add_f32_e64 v1, s3, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5gIncrPfmf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5gIncrPfmf, .Lfunc_end0-_Z5gIncrPfmf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4gSumPfmS_
.globl _Z4gSumPfmS_
.p2align 8
.type _Z4gSumPfmS_,@function
_Z4gSumPfmS_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4gSumPfmS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z4gSumPfmS_, .Lfunc_end1-_Z4gSumPfmS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5gIncrPfmf
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z5gIncrPfmf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4gSumPfmS_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z4gSumPfmS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000acc46_00000000-6_sum.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z5gIncrPfmfPfmf
.type _Z26__device_stub__Z5gIncrPfmfPfmf, @function
_Z26__device_stub__Z5gIncrPfmfPfmf:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z5gIncrPfmf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z26__device_stub__Z5gIncrPfmfPfmf, .-_Z26__device_stub__Z5gIncrPfmfPfmf
.globl _Z5gIncrPfmf
.type _Z5gIncrPfmf, @function
_Z5gIncrPfmf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z5gIncrPfmfPfmf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z5gIncrPfmf, .-_Z5gIncrPfmf
.globl _Z26__device_stub__Z4gSumPfmS_PfmS_
.type _Z26__device_stub__Z4gSumPfmS_PfmS_, @function
_Z26__device_stub__Z4gSumPfmS_PfmS_:
.LFB2053:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4gSumPfmS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z26__device_stub__Z4gSumPfmS_PfmS_, .-_Z26__device_stub__Z4gSumPfmS_PfmS_
.globl _Z4gSumPfmS_
.type _Z4gSumPfmS_, @function
_Z4gSumPfmS_:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z4gSumPfmS_PfmS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z4gSumPfmS_, .-_Z4gSumPfmS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4gSumPfmS_"
.LC1:
.string "_Z5gIncrPfmf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4gSumPfmS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z5gIncrPfmf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sum.hip"
.globl _Z20__device_stub__gIncrPfmf # -- Begin function _Z20__device_stub__gIncrPfmf
.p2align 4, 0x90
.type _Z20__device_stub__gIncrPfmf,@function
_Z20__device_stub__gIncrPfmf: # @_Z20__device_stub__gIncrPfmf
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z5gIncrPfmf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z20__device_stub__gIncrPfmf, .Lfunc_end0-_Z20__device_stub__gIncrPfmf
.cfi_endproc
# -- End function
.globl _Z19__device_stub__gSumPfmS_ # -- Begin function _Z19__device_stub__gSumPfmS_
.p2align 4, 0x90
.type _Z19__device_stub__gSumPfmS_,@function
_Z19__device_stub__gSumPfmS_: # @_Z19__device_stub__gSumPfmS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4gSumPfmS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z19__device_stub__gSumPfmS_, .Lfunc_end1-_Z19__device_stub__gSumPfmS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5gIncrPfmf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4gSumPfmS_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5gIncrPfmf,@object # @_Z5gIncrPfmf
.section .rodata,"a",@progbits
.globl _Z5gIncrPfmf
.p2align 3, 0x0
_Z5gIncrPfmf:
.quad _Z20__device_stub__gIncrPfmf
.size _Z5gIncrPfmf, 8
.type _Z4gSumPfmS_,@object # @_Z4gSumPfmS_
.globl _Z4gSumPfmS_
.p2align 3, 0x0
_Z4gSumPfmS_:
.quad _Z19__device_stub__gSumPfmS_
.size _Z4gSumPfmS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5gIncrPfmf"
.size .L__unnamed_1, 13
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z4gSumPfmS_"
.size .L__unnamed_2, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__gIncrPfmf
.addrsig_sym _Z19__device_stub__gSumPfmS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5gIncrPfmf
.addrsig_sym _Z4gSumPfmS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <cstdlib>
#include <cuda.h>
#include <map>
#include <fstream>
using namespace std;
#define delta 10
#define rows 1000
#define columns 1000
int* findBarrier(int x, int y, int * Map[columns]){
//y-координаты препятствий
int *yCoordinates = new int [columns];
//текущее значение разности м-у двумя точками по вертикали
int currDelta = 0;
for(int i = 0; i < columns; i++){
//рассматриваем область выше параллели, на которой стоит робот
for(int j = y; j > 0; j--){
currDelta = Map[j][i] - Map[j-1][i];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > delta){
yCoordinates[i] = j-1;
break;
}
}
}
return yCoordinates;
}
__global__ void SomeKernel(int* res, int* data, int col, int row,int y, int step)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
//Считаем идентификатор текущего потока
int currDelta = 0;
for (int i=step*threadId; (i<(threadId+1)*step) && (i < col); i++) //Работа со столбцами по потокам
{
for (int j = y; j > 0; j--) //Здесь работа со строками
{
currDelta = data[i + j*row] - data[i + (j-1)*row];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > 10){
res[i] = j-1;
break;
}
}
}
}
//int argc, char* argv[]
int main(int argc, char* argv[]){
map<int,float> Results;
//int numbOfBlock;
//int numbOfThread;
for (int numbOfBlock = 1; numbOfBlock <= 1; numbOfBlock++ )
{
for(int numbOfThread = 1; numbOfThread <= columns; numbOfThread+=10){
//if(columns % numbOfBlock == 0){
//numbOfThread = 1;
//if (argc > 1)
// numbOfBlock = atoi(argv[1]);
//else
// numbOfBlock = 1;
//if (argc > 2)
// numbOfThread = atoi(argv[2]);
//else
// numbOfThread = 1;
//левая и правая границы высот для генерации
const int r_left = -5, r_right = 5;
//Координаты робота на карте
//int x = rows - 1;
int y = columns - 1;
//Карта высот
int **Map = new int* [rows];
int* resH = (int*)malloc(rows*columns * sizeof(int));
for (int i=0; i<columns; i++)
resH[i] = 0;
//Заполнение карты случайыми высотами
for(int i = 0; i < rows; i++){
Map[i] = new int [columns];
for(int j = 0; j < columns; j++){
if(j!=0)
Map[i][j] = rand()%(r_left - r_right) + r_left;
else
Map[i][j] = 20;
}
}
//Помещаем двумерный массив высот в одномерный
int* dataH = (int*)malloc(columns * rows * sizeof(int));
for (int i=0; i<columns; i++)
for (int j=0; j<rows; j++)
dataH[columns*i + j] = Map[i][j];
cudaEvent_t start, stopCopyTo, stopWork, stopCopyFrom;
cudaEventCreate(&start);
cudaEventCreate(&stopCopyTo);
cudaEventCreate(&stopWork);
cudaEventCreate(&stopCopyFrom);
int* dataDevice;
int* resDevice;
//Выделяем память на GPU под созданный массив
cudaMalloc((void**)&dataDevice, (rows * columns) * sizeof(int));
cudaMalloc((void**)&resDevice, (columns) * sizeof(int));
// Копирование исходных данных в GPU для обработки
cudaEventRecord(start);
cudaMemcpy(dataDevice, dataH, (rows * columns) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(resDevice, resH, (columns)*sizeof(int), cudaMemcpyHostToDevice);
dim3 threads = dim3(numbOfThread);
dim3 blocks = dim3(numbOfBlock);
cudaEventRecord(stopCopyTo);
SomeKernel<<<blocks, threads>>>( resDevice,
dataDevice,
columns,
rows,
y,
(rows * columns)/(numbOfBlock*numbOfThread));
cudaEventRecord(stopWork);
cudaMemcpy(dataH, dataDevice, (rows * columns) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(resH, resDevice, (columns) * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stopCopyFrom);
//cout << "Result vector: ";
// for (int i=0; i<5; i++)
// {
// cout << resH[i] << " ";
// }
//
// cout<<'\t';
for(int i = 0; i < columns; i++){
delete[] Map[i];
}
float t1,t2,t3;
cudaEventElapsedTime(&t1, start, stopCopyTo);
cudaEventElapsedTime(&t2, stopCopyTo, stopWork);
cudaEventElapsedTime(&t3, stopWork, stopCopyFrom);
//cout<<"Threads: "<< numbOfBlock*numbOfThread <<"\tTime: "<<t2<<endl;
Results.insert(pair<int,float>(numbOfBlock*numbOfThread,t2));
}
}
map<int,float>::iterator it;
ofstream fout("test0.txt");
for (it = Results.begin(); it != Results.end(); ++it)///вывод на экран
{
fout << it->first << ' ' << it->second << endl;
}
fout.close();
//cout << "Количество точек: \t\t" << columns*rows << endl;
//cout << "Количество потоков: \t\t" << numbOfBlock*numbOfThread << endl;
//cout << "Время копирования на GPU: \t" << t1 << endl;
//cout << "Время выполенния: \t\t" << t2 << endl;
//cout << "Время копирования с GPU: \t" << t3 << endl;
return 0;
} | code for sm_80
Function : _Z10SomeKernelPiS_iiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0203 */
/*0040*/ IMAD R0, R0, c[0x0][0x17c], RZ ; /* 0x00005f0000007a24 */
/* 0x000fca00078e02ff */
/*0050*/ ISETP.GE.AND P0, PT, R0.reuse, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x040fe40003f06270 */
/*0060*/ IADD3 R7, R0, c[0x0][0x17c], RZ ; /* 0x00005f0000077a10 */
/* 0x000fc80007ffe0ff */
/*0070*/ ISETP.GE.U32.OR P0, PT, R0, R7, P0 ; /* 0x000000070000720c */
/* 0x000fda0000706470 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00a0*/ BSSY B0, 0x220 ; /* 0x0000017000007945 */
/* 0x000fe20003800000 */
/*00b0*/ BSSY B1, 0x1f0 ; /* 0x0000013000017945 */
/* 0x000fe20003800000 */
/*00c0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff097624 */
/* 0x001fca00078e00ff */
/*00d0*/ ISETP.GE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fda0003f06270 */
/*00e0*/ @!P0 BREAK B1 ; /* 0x0000000000018942 */
/* 0x000fe20003800000 */
/*00f0*/ @!P0 BRA 0x210 ; /* 0x0000011000008947 */
/* 0x000fea0003800000 */
/*0100*/ MOV R2, c[0x0][0x174] ; /* 0x00005d0000027a02 */
/* 0x000fe20000000f00 */
/*0110*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fc800078e00ff */
/*0120*/ IMAD R3, R9.reuse, R2, -c[0x0][0x174] ; /* 0x80005d0009037624 */
/* 0x040fe400078e0202 */
/*0130*/ IMAD R2, R9, c[0x0][0x174], R0 ; /* 0x00005d0009027a24 */
/* 0x000fc600078e0200 */
/*0140*/ IADD3 R4, R3, R0, RZ ; /* 0x0000000003047210 */
/* 0x000fe20007ffe0ff */
/*0150*/ IMAD.WIDE R2, R2, R11, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fc800078e020b */
/*0160*/ IMAD.WIDE R4, R4, R11, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fe400078e020b */
/*0170*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*0180*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1900 */
/*0190*/ IADD3 R9, R9, -0x1, RZ ; /* 0xffffffff09097810 */
/* 0x000fe20007ffe0ff */
/*01a0*/ IMAD.IADD R6, R3, 0x1, -R4 ; /* 0x0000000103067824 */
/* 0x004fca00078e0a04 */
/*01b0*/ IABS R6, R6 ; /* 0x0000000600067213 */
/* 0x000fc80000000000 */
/*01c0*/ ISETP.GE.AND P0, PT, R6, 0xb, PT ; /* 0x0000000b0600780c */
/* 0x000fda0003f06270 */
/*01d0*/ @!P0 BRA 0xd0 ; /* 0xfffffef000008947 */
/* 0x000fea000383ffff */
/*01e0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*01f0*/ IMAD.WIDE R2, R0, R11, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e020b */
/*0200*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0001e4000c101904 */
/*0210*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0220*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */
/* 0x000fc80007ffe0ff */
/*0230*/ ISETP.GE.U32.AND P0, PT, R0.reuse, R7, PT ; /* 0x000000070000720c */
/* 0x040fe40003f06070 */
/*0240*/ ISETP.LT.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f21270 */
/*0250*/ @!P0 BRA P1, 0xa0 ; /* 0xfffffe4000008947 */
/* 0x000fea000083ffff */
/*0260*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0270*/ BRA 0x270; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <cstdlib>
#include <cuda.h>
#include <map>
#include <fstream>
using namespace std;
#define delta 10
#define rows 1000
#define columns 1000
int* findBarrier(int x, int y, int * Map[columns]){
//y-координаты препятствий
int *yCoordinates = new int [columns];
//текущее значение разности м-у двумя точками по вертикали
int currDelta = 0;
for(int i = 0; i < columns; i++){
//рассматриваем область выше параллели, на которой стоит робот
for(int j = y; j > 0; j--){
currDelta = Map[j][i] - Map[j-1][i];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > delta){
yCoordinates[i] = j-1;
break;
}
}
}
return yCoordinates;
}
__global__ void SomeKernel(int* res, int* data, int col, int row,int y, int step)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
//Считаем идентификатор текущего потока
int currDelta = 0;
for (int i=step*threadId; (i<(threadId+1)*step) && (i < col); i++) //Работа со столбцами по потокам
{
for (int j = y; j > 0; j--) //Здесь работа со строками
{
currDelta = data[i + j*row] - data[i + (j-1)*row];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > 10){
res[i] = j-1;
break;
}
}
}
}
//int argc, char* argv[]
int main(int argc, char* argv[]){
map<int,float> Results;
//int numbOfBlock;
//int numbOfThread;
for (int numbOfBlock = 1; numbOfBlock <= 1; numbOfBlock++ )
{
for(int numbOfThread = 1; numbOfThread <= columns; numbOfThread+=10){
//if(columns % numbOfBlock == 0){
//numbOfThread = 1;
//if (argc > 1)
// numbOfBlock = atoi(argv[1]);
//else
// numbOfBlock = 1;
//if (argc > 2)
// numbOfThread = atoi(argv[2]);
//else
// numbOfThread = 1;
//левая и правая границы высот для генерации
const int r_left = -5, r_right = 5;
//Координаты робота на карте
//int x = rows - 1;
int y = columns - 1;
//Карта высот
int **Map = new int* [rows];
int* resH = (int*)malloc(rows*columns * sizeof(int));
for (int i=0; i<columns; i++)
resH[i] = 0;
//Заполнение карты случайыми высотами
for(int i = 0; i < rows; i++){
Map[i] = new int [columns];
for(int j = 0; j < columns; j++){
if(j!=0)
Map[i][j] = rand()%(r_left - r_right) + r_left;
else
Map[i][j] = 20;
}
}
//Помещаем двумерный массив высот в одномерный
int* dataH = (int*)malloc(columns * rows * sizeof(int));
for (int i=0; i<columns; i++)
for (int j=0; j<rows; j++)
dataH[columns*i + j] = Map[i][j];
cudaEvent_t start, stopCopyTo, stopWork, stopCopyFrom;
cudaEventCreate(&start);
cudaEventCreate(&stopCopyTo);
cudaEventCreate(&stopWork);
cudaEventCreate(&stopCopyFrom);
int* dataDevice;
int* resDevice;
//Выделяем память на GPU под созданный массив
cudaMalloc((void**)&dataDevice, (rows * columns) * sizeof(int));
cudaMalloc((void**)&resDevice, (columns) * sizeof(int));
// Копирование исходных данных в GPU для обработки
cudaEventRecord(start);
cudaMemcpy(dataDevice, dataH, (rows * columns) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(resDevice, resH, (columns)*sizeof(int), cudaMemcpyHostToDevice);
dim3 threads = dim3(numbOfThread);
dim3 blocks = dim3(numbOfBlock);
cudaEventRecord(stopCopyTo);
SomeKernel<<<blocks, threads>>>( resDevice,
dataDevice,
columns,
rows,
y,
(rows * columns)/(numbOfBlock*numbOfThread));
cudaEventRecord(stopWork);
cudaMemcpy(dataH, dataDevice, (rows * columns) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(resH, resDevice, (columns) * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stopCopyFrom);
//cout << "Result vector: ";
// for (int i=0; i<5; i++)
// {
// cout << resH[i] << " ";
// }
//
// cout<<'\t';
for(int i = 0; i < columns; i++){
delete[] Map[i];
}
float t1,t2,t3;
cudaEventElapsedTime(&t1, start, stopCopyTo);
cudaEventElapsedTime(&t2, stopCopyTo, stopWork);
cudaEventElapsedTime(&t3, stopWork, stopCopyFrom);
//cout<<"Threads: "<< numbOfBlock*numbOfThread <<"\tTime: "<<t2<<endl;
Results.insert(pair<int,float>(numbOfBlock*numbOfThread,t2));
}
}
map<int,float>::iterator it;
ofstream fout("test0.txt");
for (it = Results.begin(); it != Results.end(); ++it)///вывод на экран
{
fout << it->first << ' ' << it->second << endl;
}
fout.close();
//cout << "Количество точек: \t\t" << columns*rows << endl;
//cout << "Количество потоков: \t\t" << numbOfBlock*numbOfThread << endl;
//cout << "Время копирования на GPU: \t" << t1 << endl;
//cout << "Время выполенния: \t\t" << t2 << endl;
//cout << "Время копирования с GPU: \t" << t3 << endl;
return 0;
} | .file "tmpxft_000f14e7_00000000-6_test000.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4236:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4236:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11findBarrieriiPPi
.type _Z11findBarrieriiPPi, @function
_Z11findBarrieriiPPi:
.LFB4204:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movl %esi, %ebp
movq %rdx, %rbx
movl $4000, %edi
call _Znam@PLT
movq %rax, %rdi
movl $0, %esi
jmp .L4
.L13:
subl $1, %edx
movl %edx, (%rdi,%rsi)
.L6:
addq $4, %rsi
cmpq $4000, %rsi
je .L3
.L4:
movslq %ebp, %rdx
testl %ebp, %ebp
jle .L6
.L7:
movq (%rbx,%rdx,8), %rax
movq -8(%rbx,%rdx,8), %rcx
movl (%rax,%rsi), %eax
subl (%rcx,%rsi), %eax
movl %eax, %ecx
negl %ecx
cmovns %ecx, %eax
cmpl $10, %eax
jg .L13
subq $1, %rdx
testl %edx, %edx
jg .L7
jmp .L6
.L3:
movq %rdi, %rax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4204:
.size _Z11findBarrieriiPPi, .-_Z11findBarrieriiPPi
.globl _Z36__device_stub__Z10SomeKernelPiS_iiiiPiS_iiii
.type _Z36__device_stub__Z10SomeKernelPiS_iiiiPiS_iiii, @function
_Z36__device_stub__Z10SomeKernelPiS_iiiiPiS_iiii:
.LFB4258:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10SomeKernelPiS_iiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L14
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4258:
.size _Z36__device_stub__Z10SomeKernelPiS_iiiiPiS_iiii, .-_Z36__device_stub__Z10SomeKernelPiS_iiiiPiS_iiii
.globl _Z10SomeKernelPiS_iiii
.type _Z10SomeKernelPiS_iiii, @function
_Z10SomeKernelPiS_iiii:
.LFB4259:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z10SomeKernelPiS_iiiiPiS_iiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4259:
.size _Z10SomeKernelPiS_iiii, .-_Z10SomeKernelPiS_iiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10SomeKernelPiS_iiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4261:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10SomeKernelPiS_iiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4261:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E,"axG",@progbits,_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E,comdat
.align 2
.weak _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
.type _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E, @function
_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E:
.LFB4765:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbp
movq %rsi, %rbx
testq %rsi, %rsi
je .L24
.L26:
movq 24(%rbx), %rsi
movq %rbp, %rdi
call _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
movq %rbx, %rdi
movq 16(%rbx), %rbx
movl $40, %esi
call _ZdlPvm@PLT
testq %rbx, %rbx
jne .L26
.L24:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4765:
.size _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E, .-_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
.section .text._ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_,"axG",@progbits,_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_,comdat
.align 2
.weak _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_
.type _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_, @function
_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_:
.LFB4920:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbp
movq 16(%rdi), %rbx
testq %rbx, %rbx
je .L42
movl (%rsi), %ecx
movl $0, %r8d
movl $1, %esi
jmp .L34
.L42:
leaq 8(%rdi), %rbx
.L31:
cmpq %rbx, 24(%rdi)
je .L39
movq %rbx, %rdi
call _ZSt18_Rb_tree_decrementPSt18_Rb_tree_node_base@PLT
movq %rbx, %rcx
movq %rax, %rbx
jmp .L35
.L32:
movq 24(%rbx), %rax
movl %r8d, %edx
.L33:
testq %rax, %rax
je .L43
movq %rax, %rbx
.L34:
cmpl 32(%rbx), %ecx
jge .L32
movq 16(%rbx), %rax
movl %esi, %edx
jmp .L33
.L43:
movq %rbx, %rcx
testb %dl, %dl
jne .L31
.L35:
movl 32(%rbx), %eax
cmpl 0(%rbp), %eax
movl $0, %edx
cmovge %rdx, %rcx
movl $0, %eax
cmovl %rax, %rbx
.L36:
movq %rbx, %rax
movq %rcx, %rdx
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
movq %rbx, %rcx
movl $0, %ebx
jmp .L36
.cfi_endproc
.LFE4920:
.size _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_, .-_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_
.section .text._ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_,"axG",@progbits,_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_,comdat
.align 2
.weak _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_
.type _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_, @function
_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_:
.LFB5004:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbp
movq %rdx, %r12
leaq 8(%rdi), %rax
cmpq %rax, %rsi
je .L57
movq %rsi, %rbx
movl (%rdx), %r13d
movl 32(%rsi), %eax
cmpl %r13d, %eax
jle .L48
movq 24(%rdi), %rdx
movq %rdx, %rax
cmpq %rsi, %rdx
je .L47
movq %rsi, %rdi
call _ZSt18_Rb_tree_decrementPSt18_Rb_tree_node_base@PLT
movq %rax, %rdx
cmpl %r13d, 32(%rax)
jge .L50
movq %rbx, %rax
cmpq $0, 24(%rdx)
movl $0, %ecx
cmove %rcx, %rax
cmovne %rbx, %rdx
jmp .L47
.L57:
cmpq $0, 40(%rdi)
je .L46
movq 32(%rdi), %rdx
movl 32(%rdx), %eax
cmpl %eax, (%r12)
jle .L46
movl $0, %eax
jmp .L47
.L46:
movq %r12, %rsi
movq %rbp, %rdi
call _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_
.L47:
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L50:
.cfi_restore_state
movq %r12, %rsi
movq %rbp, %rdi
call _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_
jmp .L47
.L48:
jge .L52
movq 32(%rdi), %rdx
movl $0, %eax
cmpq %rsi, %rdx
je .L47
movq %rsi, %rdi
call _ZSt18_Rb_tree_incrementPSt18_Rb_tree_node_base@PLT
movq %rax, %rdx
cmpl 32(%rax), %r13d
jge .L54
cmpq $0, 24(%rbx)
movl $0, %ecx
cmove %rcx, %rax
cmove %rbx, %rdx
jmp .L47
.L54:
movq %r12, %rsi
movq %rbp, %rdi
call _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_
jmp .L47
.L52:
movq %rsi, %rax
movl $0, %edx
jmp .L47
.cfi_endproc
.LFE5004:
.size _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_, .-_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_
.section .text._ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_,"axG",@progbits,_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_,comdat
.align 2
.weak _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_
.type _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_, @function
_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_:
.LFB4911:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movq %rdi, %rbp
movq %rsi, %r14
movq %rdx, %r12
movl $40, %edi
call _Znwm@PLT
movq %rax, %rbx
movl (%r12), %r13d
movl %r13d, 32(%rax)
movss 4(%r12), %xmm0
movss %xmm0, 36(%rax)
leaq 32(%rax), %rdx
movq %r14, %rsi
movq %rbp, %rdi
call _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_
testq %rdx, %rdx
je .L59
movl $1, %edi
testq %rax, %rax
je .L65
.L60:
leaq 8(%rbp), %rcx
movzbl %dil, %edi
movq %rbx, %rsi
call _ZSt29_Rb_tree_insert_and_rebalancebPSt18_Rb_tree_node_baseS0_RS_@PLT
addq $1, 40(%rbp)
movq %rbx, %rbp
.L61:
movq %rbp, %rax
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L65:
.cfi_restore_state
leaq 8(%rbp), %rax
cmpq %rax, %rdx
je .L60
cmpl 32(%rdx), %r13d
setl %dil
jmp .L60
.L59:
movq %rax, %rbp
movl $40, %esi
movq %rbx, %rdi
call _ZdlPvm@PLT
jmp .L61
.cfi_endproc
.LFE4911:
.size _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_, .-_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_
.section .rodata.str1.1
.LC1:
.string "test0.txt"
.text
.globl main
.type main, @function
main:
.LFB4205:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4205
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $712, %rsp
.cfi_def_cfa_offset 768
movq %fs:40, %rax
movq %rax, 696(%rsp)
xorl %eax, %eax
movl $0, 136(%rsp)
movq $0, 144(%rsp)
leaq 136(%rsp), %rax
movq %rax, 152(%rsp)
movq %rax, 160(%rsp)
movq $0, 168(%rsp)
movl $1, %r15d
leaq 48(%rsp), %rax
movq %rax, 24(%rsp)
jmp .L84
.L118:
movq %rax, 8(%rsp)
movl $4000000, %edi
call malloc@PLT
movq %rax, 16(%rsp)
leaq 4000(%rax), %rdx
.L67:
movl $0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L67
movq 8(%rsp), %r13
movq %r13, %r12
leaq 8000(%r13), %r14
jmp .L72
.L114:
movq %rax, %rbp
movq %rax, 0(%r13)
movl $0, %ebx
jmp .L71
.L112:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
subl $5, %eax
movl %eax, 0(%rbp,%rbx,4)
addq $1, %rbx
cmpq $1000, %rbx
je .L111
.L71:
testl %ebx, %ebx
jne .L112
movl $20, 0(%rbp)
addq $1, %rbx
jmp .L71
.L111:
addq $8, %r13
cmpq %r14, %r13
je .L113
.L72:
movl $4000, %edi
.LEHB0:
call _Znam@PLT
jmp .L114
.L113:
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbx
movq %rax, %rcx
movq 8(%rsp), %rdi
movl $0, %r8d
.L73:
movq (%rdi), %rsi
movl $0, %eax
.L74:
movl (%rsi,%rax), %edx
movl %edx, (%rcx,%rax)
addq $4, %rax
cmpq $4000, %rax
jne .L74
addq $8, %rdi
addl $1000, %r8d
addq $4000, %rcx
cmpl $1000000, %r8d
jne .L73
movq 24(%rsp), %rdi
call cudaEventCreate@PLT
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
leaq 80(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 88(%rsp), %rdi
movl $4000, %esi
call cudaMalloc@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, %ecx
movl $4000000, %edx
movq %rbx, %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4000, %edx
movq 16(%rsp), %rsi
movq 88(%rsp), %rdi
call cudaMemcpy@PLT
movl %r15d, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 124(%rsp)
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movl 112(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 104(%rsp), %rdx
movq 116(%rsp), %rdi
movl 124(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L76
movl $1000000, %eax
movl $0, %edx
idivl %r15d
movl %eax, %r9d
movl $999, %r8d
movl $1000, %ecx
movl $1000, %edx
movq 80(%rsp), %rsi
movq 88(%rsp), %rdi
call _Z36__device_stub__Z10SomeKernelPiS_iiiiPiS_iiii
.L76:
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl $2, %ecx
movl $4000000, %edx
movq 80(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movl $4000, %edx
movq 88(%rsp), %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
jmp .L78
.L77:
addq $8, %r12
cmpq %r14, %r12
je .L115
.L78:
movq (%r12), %rdi
testq %rdi, %rdi
je .L77
call _ZdaPv@PLT
jmp .L77
.L115:
leaq 36(%rsp), %rdi
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 40(%rsp), %rdi
movq 64(%rsp), %rdx
movq 56(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 44(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl %r15d, 96(%rsp)
movss 40(%rsp), %xmm0
movss %xmm0, 100(%rsp)
movq 144(%rsp), %rax
testq %rax, %rax
je .L97
leaq 136(%rsp), %rsi
jmp .L82
.L80:
movq 24(%rax), %rax
.L81:
testq %rax, %rax
je .L116
.L82:
cmpl %r15d, 32(%rax)
jl .L80
movq %rax, %rsi
movq 16(%rax), %rax
jmp .L81
.L116:
leaq 136(%rsp), %rax
cmpq %rax, %rsi
je .L98
cmpl %r15d, 32(%rsi)
jle .L83
.L79:
leaq 96(%rsp), %rdx
leaq 128(%rsp), %rdi
call _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_
jmp .L83
.L97:
leaq 136(%rsp), %rsi
jmp .L79
.L98:
leaq 136(%rsp), %rsi
jmp .L79
.L83:
addl $10, %r15d
cmpl $1001, %r15d
je .L117
.L84:
movl $8000, %edi
call _Znam@PLT
jmp .L118
.L117:
leaq 176(%rsp), %rdi
movl $16, %edx
leaq .LC1(%rip), %rsi
call _ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode@PLT
.LEHE0:
movq 152(%rsp), %rbp
leaq 136(%rsp), %rax
cmpq %rax, %rbp
je .L85
leaq 176(%rsp), %r12
jmp .L92
.L123:
movq %rax, %rbx
movb $32, 116(%rsp)
movq (%rax), %rax
movq -24(%rax), %rax
cmpq $0, 16(%rbx,%rax)
je .L86
leaq 116(%rsp), %rsi
movl $1, %edx
movq %rbx, %rdi
.LEHB1:
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq %rax, %rbx
.L87:
pxor %xmm0, %xmm0
cvtss2sd 36(%rbp), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
jmp .L119
.L86:
movl $32, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
jmp .L87
.L119:
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r13
testq %r13, %r13
je .L120
cmpb $0, 56(%r13)
je .L90
movzbl 67(%r13), %eax
.L91:
movsbl %al, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
jmp .L121
.L120:
movq 696(%rsp), %rax
subq %fs:40, %rax
jne .L122
call _ZSt16__throw_bad_castv@PLT
.L100:
endbr64
movq %rax, %rbx
leaq 176(%rsp), %rdi
call _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev@PLT
.L94:
leaq 128(%rsp), %rdi
movq 144(%rsp), %rsi
call _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
movq 696(%rsp), %rax
subq %fs:40, %rax
je .L95
call __stack_chk_fail@PLT
.L122:
call __stack_chk_fail@PLT
.L90:
movq %r13, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%r13), %rax
movl $10, %esi
movq %r13, %rdi
call *48(%rax)
jmp .L91
.L121:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movq %rbp, %rdi
call _ZSt18_Rb_tree_incrementPSt18_Rb_tree_node_base@PLT
movq %rax, %rbp
leaq 136(%rsp), %rax
cmpq %rax, %rbp
je .L85
.L92:
movl 32(%rbp), %esi
movq %r12, %rdi
call _ZNSolsEi@PLT
jmp .L123
.L85:
leaq 176(%rsp), %rdi
call _ZNSt14basic_ofstreamIcSt11char_traitsIcEE5closeEv@PLT
.LEHE1:
leaq 176(%rsp), %rdi
call _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev@PLT
leaq 128(%rsp), %rdi
movq 144(%rsp), %rsi
call _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
movq 696(%rsp), %rax
subq %fs:40, %rax
jne .L124
movl $0, %eax
addq $712, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L99:
.cfi_restore_state
endbr64
movq %rax, %rbx
jmp .L94
.L95:
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L124:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4205:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA4205:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4205-.LLSDACSB4205
.LLSDACSB4205:
.uleb128 .LEHB0-.LFB4205
.uleb128 .LEHE0-.LEHB0
.uleb128 .L99-.LFB4205
.uleb128 0
.uleb128 .LEHB1-.LFB4205
.uleb128 .LEHE1-.LEHB1
.uleb128 .L100-.LFB4205
.uleb128 0
.uleb128 .LEHB2-.LFB4205
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.LLSDACSE4205:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cstdlib>
#include <cuda.h>
#include <map>
#include <fstream>
using namespace std;
#define delta 10
#define rows 1000
#define columns 1000
int* findBarrier(int x, int y, int * Map[columns]){
//y-координаты препятствий
int *yCoordinates = new int [columns];
//текущее значение разности м-у двумя точками по вертикали
int currDelta = 0;
for(int i = 0; i < columns; i++){
//рассматриваем область выше параллели, на которой стоит робот
for(int j = y; j > 0; j--){
currDelta = Map[j][i] - Map[j-1][i];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > delta){
yCoordinates[i] = j-1;
break;
}
}
}
return yCoordinates;
}
__global__ void SomeKernel(int* res, int* data, int col, int row,int y, int step)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
//Считаем идентификатор текущего потока
int currDelta = 0;
for (int i=step*threadId; (i<(threadId+1)*step) && (i < col); i++) //Работа со столбцами по потокам
{
for (int j = y; j > 0; j--) //Здесь работа со строками
{
currDelta = data[i + j*row] - data[i + (j-1)*row];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > 10){
res[i] = j-1;
break;
}
}
}
}
//int argc, char* argv[]
int main(int argc, char* argv[]){
map<int,float> Results;
//int numbOfBlock;
//int numbOfThread;
for (int numbOfBlock = 1; numbOfBlock <= 1; numbOfBlock++ )
{
for(int numbOfThread = 1; numbOfThread <= columns; numbOfThread+=10){
//if(columns % numbOfBlock == 0){
//numbOfThread = 1;
//if (argc > 1)
// numbOfBlock = atoi(argv[1]);
//else
// numbOfBlock = 1;
//if (argc > 2)
// numbOfThread = atoi(argv[2]);
//else
// numbOfThread = 1;
//левая и правая границы высот для генерации
const int r_left = -5, r_right = 5;
//Координаты робота на карте
//int x = rows - 1;
int y = columns - 1;
//Карта высот
int **Map = new int* [rows];
int* resH = (int*)malloc(rows*columns * sizeof(int));
for (int i=0; i<columns; i++)
resH[i] = 0;
//Заполнение карты случайыми высотами
for(int i = 0; i < rows; i++){
Map[i] = new int [columns];
for(int j = 0; j < columns; j++){
if(j!=0)
Map[i][j] = rand()%(r_left - r_right) + r_left;
else
Map[i][j] = 20;
}
}
//Помещаем двумерный массив высот в одномерный
int* dataH = (int*)malloc(columns * rows * sizeof(int));
for (int i=0; i<columns; i++)
for (int j=0; j<rows; j++)
dataH[columns*i + j] = Map[i][j];
cudaEvent_t start, stopCopyTo, stopWork, stopCopyFrom;
cudaEventCreate(&start);
cudaEventCreate(&stopCopyTo);
cudaEventCreate(&stopWork);
cudaEventCreate(&stopCopyFrom);
int* dataDevice;
int* resDevice;
//Выделяем память на GPU под созданный массив
cudaMalloc((void**)&dataDevice, (rows * columns) * sizeof(int));
cudaMalloc((void**)&resDevice, (columns) * sizeof(int));
// Копирование исходных данных в GPU для обработки
cudaEventRecord(start);
cudaMemcpy(dataDevice, dataH, (rows * columns) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(resDevice, resH, (columns)*sizeof(int), cudaMemcpyHostToDevice);
dim3 threads = dim3(numbOfThread);
dim3 blocks = dim3(numbOfBlock);
cudaEventRecord(stopCopyTo);
SomeKernel<<<blocks, threads>>>( resDevice,
dataDevice,
columns,
rows,
y,
(rows * columns)/(numbOfBlock*numbOfThread));
cudaEventRecord(stopWork);
cudaMemcpy(dataH, dataDevice, (rows * columns) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(resH, resDevice, (columns) * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stopCopyFrom);
//cout << "Result vector: ";
// for (int i=0; i<5; i++)
// {
// cout << resH[i] << " ";
// }
//
// cout<<'\t';
for(int i = 0; i < columns; i++){
delete[] Map[i];
}
float t1,t2,t3;
cudaEventElapsedTime(&t1, start, stopCopyTo);
cudaEventElapsedTime(&t2, stopCopyTo, stopWork);
cudaEventElapsedTime(&t3, stopWork, stopCopyFrom);
//cout<<"Threads: "<< numbOfBlock*numbOfThread <<"\tTime: "<<t2<<endl;
Results.insert(pair<int,float>(numbOfBlock*numbOfThread,t2));
}
}
map<int,float>::iterator it;
ofstream fout("test0.txt");
for (it = Results.begin(); it != Results.end(); ++it)///вывод на экран
{
fout << it->first << ' ' << it->second << endl;
}
fout.close();
//cout << "Количество точек: \t\t" << columns*rows << endl;
//cout << "Количество потоков: \t\t" << numbOfBlock*numbOfThread << endl;
//cout << "Время копирования на GPU: \t" << t1 << endl;
//cout << "Время выполенния: \t\t" << t2 << endl;
//cout << "Время копирования с GPU: \t" << t3 << endl;
return 0;
} | #include <iostream>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <map>
#include <fstream>
using namespace std;
#define delta 10
#define rows 1000
#define columns 1000
int* findBarrier(int x, int y, int * Map[columns]){
//y-координаты препятствий
int *yCoordinates = new int [columns];
//текущее значение разности м-у двумя точками по вертикали
int currDelta = 0;
for(int i = 0; i < columns; i++){
//рассматриваем область выше параллели, на которой стоит робот
for(int j = y; j > 0; j--){
currDelta = Map[j][i] - Map[j-1][i];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > delta){
yCoordinates[i] = j-1;
break;
}
}
}
return yCoordinates;
}
__global__ void SomeKernel(int* res, int* data, int col, int row,int y, int step)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
//Считаем идентификатор текущего потока
int currDelta = 0;
for (int i=step*threadId; (i<(threadId+1)*step) && (i < col); i++) //Работа со столбцами по потокам
{
for (int j = y; j > 0; j--) //Здесь работа со строками
{
currDelta = data[i + j*row] - data[i + (j-1)*row];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > 10){
res[i] = j-1;
break;
}
}
}
}
//int argc, char* argv[]
int main(int argc, char* argv[]){
map<int,float> Results;
//int numbOfBlock;
//int numbOfThread;
for (int numbOfBlock = 1; numbOfBlock <= 1; numbOfBlock++ )
{
for(int numbOfThread = 1; numbOfThread <= columns; numbOfThread+=10){
//if(columns % numbOfBlock == 0){
//numbOfThread = 1;
//if (argc > 1)
// numbOfBlock = atoi(argv[1]);
//else
// numbOfBlock = 1;
//if (argc > 2)
// numbOfThread = atoi(argv[2]);
//else
// numbOfThread = 1;
//левая и правая границы высот для генерации
const int r_left = -5, r_right = 5;
//Координаты робота на карте
//int x = rows - 1;
int y = columns - 1;
//Карта высот
int **Map = new int* [rows];
int* resH = (int*)malloc(rows*columns * sizeof(int));
for (int i=0; i<columns; i++)
resH[i] = 0;
//Заполнение карты случайыми высотами
for(int i = 0; i < rows; i++){
Map[i] = new int [columns];
for(int j = 0; j < columns; j++){
if(j!=0)
Map[i][j] = rand()%(r_left - r_right) + r_left;
else
Map[i][j] = 20;
}
}
//Помещаем двумерный массив высот в одномерный
int* dataH = (int*)malloc(columns * rows * sizeof(int));
for (int i=0; i<columns; i++)
for (int j=0; j<rows; j++)
dataH[columns*i + j] = Map[i][j];
hipEvent_t start, stopCopyTo, stopWork, stopCopyFrom;
hipEventCreate(&start);
hipEventCreate(&stopCopyTo);
hipEventCreate(&stopWork);
hipEventCreate(&stopCopyFrom);
int* dataDevice;
int* resDevice;
//Выделяем память на GPU под созданный массив
hipMalloc((void**)&dataDevice, (rows * columns) * sizeof(int));
hipMalloc((void**)&resDevice, (columns) * sizeof(int));
// Копирование исходных данных в GPU для обработки
hipEventRecord(start);
hipMemcpy(dataDevice, dataH, (rows * columns) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(resDevice, resH, (columns)*sizeof(int), hipMemcpyHostToDevice);
dim3 threads = dim3(numbOfThread);
dim3 blocks = dim3(numbOfBlock);
hipEventRecord(stopCopyTo);
SomeKernel<<<blocks, threads>>>( resDevice,
dataDevice,
columns,
rows,
y,
(rows * columns)/(numbOfBlock*numbOfThread));
hipEventRecord(stopWork);
hipMemcpy(dataH, dataDevice, (rows * columns) * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(resH, resDevice, (columns) * sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stopCopyFrom);
//cout << "Result vector: ";
// for (int i=0; i<5; i++)
// {
// cout << resH[i] << " ";
// }
//
// cout<<'\t';
for(int i = 0; i < columns; i++){
delete[] Map[i];
}
float t1,t2,t3;
hipEventElapsedTime(&t1, start, stopCopyTo);
hipEventElapsedTime(&t2, stopCopyTo, stopWork);
hipEventElapsedTime(&t3, stopWork, stopCopyFrom);
//cout<<"Threads: "<< numbOfBlock*numbOfThread <<"\tTime: "<<t2<<endl;
Results.insert(pair<int,float>(numbOfBlock*numbOfThread,t2));
}
}
map<int,float>::iterator it;
ofstream fout("test0.txt");
for (it = Results.begin(); it != Results.end(); ++it)///вывод на экран
{
fout << it->first << ' ' << it->second << endl;
}
fout.close();
//cout << "Количество точек: \t\t" << columns*rows << endl;
//cout << "Количество потоков: \t\t" << numbOfBlock*numbOfThread << endl;
//cout << "Время копирования на GPU: \t" << t1 << endl;
//cout << "Время выполенния: \t\t" << t2 << endl;
//cout << "Время копирования с GPU: \t" << t3 << endl;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <map>
#include <fstream>
using namespace std;
#define delta 10
#define rows 1000
#define columns 1000
int* findBarrier(int x, int y, int * Map[columns]){
//y-координаты препятствий
int *yCoordinates = new int [columns];
//текущее значение разности м-у двумя точками по вертикали
int currDelta = 0;
for(int i = 0; i < columns; i++){
//рассматриваем область выше параллели, на которой стоит робот
for(int j = y; j > 0; j--){
currDelta = Map[j][i] - Map[j-1][i];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > delta){
yCoordinates[i] = j-1;
break;
}
}
}
return yCoordinates;
}
__global__ void SomeKernel(int* res, int* data, int col, int row,int y, int step)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
//Считаем идентификатор текущего потока
int currDelta = 0;
for (int i=step*threadId; (i<(threadId+1)*step) && (i < col); i++) //Работа со столбцами по потокам
{
for (int j = y; j > 0; j--) //Здесь работа со строками
{
currDelta = data[i + j*row] - data[i + (j-1)*row];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > 10){
res[i] = j-1;
break;
}
}
}
}
//int argc, char* argv[]
int main(int argc, char* argv[]){
map<int,float> Results;
//int numbOfBlock;
//int numbOfThread;
for (int numbOfBlock = 1; numbOfBlock <= 1; numbOfBlock++ )
{
for(int numbOfThread = 1; numbOfThread <= columns; numbOfThread+=10){
//if(columns % numbOfBlock == 0){
//numbOfThread = 1;
//if (argc > 1)
// numbOfBlock = atoi(argv[1]);
//else
// numbOfBlock = 1;
//if (argc > 2)
// numbOfThread = atoi(argv[2]);
//else
// numbOfThread = 1;
//левая и правая границы высот для генерации
const int r_left = -5, r_right = 5;
//Координаты робота на карте
//int x = rows - 1;
int y = columns - 1;
//Карта высот
int **Map = new int* [rows];
int* resH = (int*)malloc(rows*columns * sizeof(int));
for (int i=0; i<columns; i++)
resH[i] = 0;
//Заполнение карты случайыми высотами
for(int i = 0; i < rows; i++){
Map[i] = new int [columns];
for(int j = 0; j < columns; j++){
if(j!=0)
Map[i][j] = rand()%(r_left - r_right) + r_left;
else
Map[i][j] = 20;
}
}
//Помещаем двумерный массив высот в одномерный
int* dataH = (int*)malloc(columns * rows * sizeof(int));
for (int i=0; i<columns; i++)
for (int j=0; j<rows; j++)
dataH[columns*i + j] = Map[i][j];
hipEvent_t start, stopCopyTo, stopWork, stopCopyFrom;
hipEventCreate(&start);
hipEventCreate(&stopCopyTo);
hipEventCreate(&stopWork);
hipEventCreate(&stopCopyFrom);
int* dataDevice;
int* resDevice;
//Выделяем память на GPU под созданный массив
hipMalloc((void**)&dataDevice, (rows * columns) * sizeof(int));
hipMalloc((void**)&resDevice, (columns) * sizeof(int));
// Копирование исходных данных в GPU для обработки
hipEventRecord(start);
hipMemcpy(dataDevice, dataH, (rows * columns) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(resDevice, resH, (columns)*sizeof(int), hipMemcpyHostToDevice);
dim3 threads = dim3(numbOfThread);
dim3 blocks = dim3(numbOfBlock);
hipEventRecord(stopCopyTo);
SomeKernel<<<blocks, threads>>>( resDevice,
dataDevice,
columns,
rows,
y,
(rows * columns)/(numbOfBlock*numbOfThread));
hipEventRecord(stopWork);
hipMemcpy(dataH, dataDevice, (rows * columns) * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(resH, resDevice, (columns) * sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stopCopyFrom);
//cout << "Result vector: ";
// for (int i=0; i<5; i++)
// {
// cout << resH[i] << " ";
// }
//
// cout<<'\t';
for(int i = 0; i < columns; i++){
delete[] Map[i];
}
float t1,t2,t3;
hipEventElapsedTime(&t1, start, stopCopyTo);
hipEventElapsedTime(&t2, stopCopyTo, stopWork);
hipEventElapsedTime(&t3, stopWork, stopCopyFrom);
//cout<<"Threads: "<< numbOfBlock*numbOfThread <<"\tTime: "<<t2<<endl;
Results.insert(pair<int,float>(numbOfBlock*numbOfThread,t2));
}
}
map<int,float>::iterator it;
ofstream fout("test0.txt");
for (it = Results.begin(); it != Results.end(); ++it)///вывод на экран
{
fout << it->first << ' ' << it->second << endl;
}
fout.close();
//cout << "Количество точек: \t\t" << columns*rows << endl;
//cout << "Количество потоков: \t\t" << numbOfBlock*numbOfThread << endl;
//cout << "Время копирования на GPU: \t" << t1 << endl;
//cout << "Время выполенния: \t\t" << t2 << endl;
//cout << "Время копирования с GPU: \t" << t3 << endl;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10SomeKernelPiS_iiii
.globl _Z10SomeKernelPiS_iiii
.p2align 8
.type _Z10SomeKernelPiS_iiii,@function
_Z10SomeKernelPiS_iiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x1c
s_load_b32 s8, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_mul_lo_u32 v0, v1, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v5, s3, v0
v_cmp_gt_i32_e64 s2, s8, v0
v_cmp_lt_u32_e32 vcc_lo, v0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_10
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x14
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s1, 0
s_waitcnt lgkmcnt(0)
s_add_i32 s0, s3, -1
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s2, s0, v[0:1]
v_mad_u64_u32 v[3:4], null, s3, s2, v[0:1]
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s0
v_add_nc_u32_e32 v0, 1, v0
v_add_nc_u32_e32 v2, 1, v2
v_add_nc_u32_e32 v3, 1, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_ge_u32_e32 vcc_lo, v0, v5
v_cmp_le_i32_e64 s0, s8, v0
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s0, exec_lo, s0
s_or_b32 s1, s0, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_10
.LBB0_3:
s_mov_b32 s0, 0
s_mov_b32 s10, 0
s_mov_b32 s11, s3
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_6
.p2align 6
.LBB0_4:
v_add_nc_u32_e32 v6, s10, v3
v_add_nc_u32_e32 v8, s10, v2
s_and_not1_b32 s12, s12, exec_lo
s_add_i32 s11, s11, -1
s_sub_i32 s10, s10, s2
v_ashrrev_i32_e32 v7, 31, v6
v_ashrrev_i32_e32 v9, 31, v8
s_mov_b32 s13, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v8, vcc_lo, s6, v8
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
s_clause 0x1
global_load_b32 v1, v[6:7], off
global_load_b32 v4, v[8:9], off
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v1, v1, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v4, 0, v1
v_max_i32_e32 v1, v1, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_lt_u32_e32 vcc_lo, 10, v1
s_and_b32 s14, vcc_lo, exec_lo
s_or_b32 s12, s12, s14
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
s_and_b32 s14, exec_lo, s12
v_mov_b32_e32 v4, s11
s_or_b32 s0, s14, s0
s_and_not1_b32 s9, s9, exec_lo
s_and_b32 s13, s13, exec_lo
s_or_b32 s9, s9, s13
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execz .LBB0_8
.LBB0_6:
s_or_b32 s12, s12, exec_lo
s_cmp_lt_i32 s11, 1
s_cbranch_scc0 .LBB0_4
s_mov_b32 s13, -1
s_branch .LBB0_5
.LBB0_8:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s0
s_xor_b32 s0, s9, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s9, s0
s_xor_b32 s0, exec_lo, s9
s_cbranch_execz .LBB0_2
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[0:1]
v_add_co_u32 v6, vcc_lo, s4, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo
global_store_b32 v[6:7], v4, off
s_branch .LBB0_2
.LBB0_10:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10SomeKernelPiS_iiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10SomeKernelPiS_iiii, .Lfunc_end0-_Z10SomeKernelPiS_iiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10SomeKernelPiS_iiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10SomeKernelPiS_iiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <map>
#include <fstream>
using namespace std;
#define delta 10
#define rows 1000
#define columns 1000
int* findBarrier(int x, int y, int * Map[columns]){
//y-координаты препятствий
int *yCoordinates = new int [columns];
//текущее значение разности м-у двумя точками по вертикали
int currDelta = 0;
for(int i = 0; i < columns; i++){
//рассматриваем область выше параллели, на которой стоит робот
for(int j = y; j > 0; j--){
currDelta = Map[j][i] - Map[j-1][i];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > delta){
yCoordinates[i] = j-1;
break;
}
}
}
return yCoordinates;
}
__global__ void SomeKernel(int* res, int* data, int col, int row,int y, int step)
{
unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
//Считаем идентификатор текущего потока
int currDelta = 0;
for (int i=step*threadId; (i<(threadId+1)*step) && (i < col); i++) //Работа со столбцами по потокам
{
for (int j = y; j > 0; j--) //Здесь работа со строками
{
currDelta = data[i + j*row] - data[i + (j-1)*row];
//если текущая разность больше дельты, то запоминаем у-координату
if( ( currDelta >= 0 ? currDelta : currDelta*-1 ) > 10){
res[i] = j-1;
break;
}
}
}
}
//int argc, char* argv[]
int main(int argc, char* argv[]){
map<int,float> Results;
//int numbOfBlock;
//int numbOfThread;
for (int numbOfBlock = 1; numbOfBlock <= 1; numbOfBlock++ )
{
for(int numbOfThread = 1; numbOfThread <= columns; numbOfThread+=10){
//if(columns % numbOfBlock == 0){
//numbOfThread = 1;
//if (argc > 1)
// numbOfBlock = atoi(argv[1]);
//else
// numbOfBlock = 1;
//if (argc > 2)
// numbOfThread = atoi(argv[2]);
//else
// numbOfThread = 1;
//левая и правая границы высот для генерации
const int r_left = -5, r_right = 5;
//Координаты робота на карте
//int x = rows - 1;
int y = columns - 1;
//Карта высот
int **Map = new int* [rows];
int* resH = (int*)malloc(rows*columns * sizeof(int));
for (int i=0; i<columns; i++)
resH[i] = 0;
//Заполнение карты случайыми высотами
for(int i = 0; i < rows; i++){
Map[i] = new int [columns];
for(int j = 0; j < columns; j++){
if(j!=0)
Map[i][j] = rand()%(r_left - r_right) + r_left;
else
Map[i][j] = 20;
}
}
//Помещаем двумерный массив высот в одномерный
int* dataH = (int*)malloc(columns * rows * sizeof(int));
for (int i=0; i<columns; i++)
for (int j=0; j<rows; j++)
dataH[columns*i + j] = Map[i][j];
hipEvent_t start, stopCopyTo, stopWork, stopCopyFrom;
hipEventCreate(&start);
hipEventCreate(&stopCopyTo);
hipEventCreate(&stopWork);
hipEventCreate(&stopCopyFrom);
int* dataDevice;
int* resDevice;
//Выделяем память на GPU под созданный массив
hipMalloc((void**)&dataDevice, (rows * columns) * sizeof(int));
hipMalloc((void**)&resDevice, (columns) * sizeof(int));
// Копирование исходных данных в GPU для обработки
hipEventRecord(start);
hipMemcpy(dataDevice, dataH, (rows * columns) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(resDevice, resH, (columns)*sizeof(int), hipMemcpyHostToDevice);
dim3 threads = dim3(numbOfThread);
dim3 blocks = dim3(numbOfBlock);
hipEventRecord(stopCopyTo);
SomeKernel<<<blocks, threads>>>( resDevice,
dataDevice,
columns,
rows,
y,
(rows * columns)/(numbOfBlock*numbOfThread));
hipEventRecord(stopWork);
hipMemcpy(dataH, dataDevice, (rows * columns) * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(resH, resDevice, (columns) * sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stopCopyFrom);
//cout << "Result vector: ";
// for (int i=0; i<5; i++)
// {
// cout << resH[i] << " ";
// }
//
// cout<<'\t';
for(int i = 0; i < columns; i++){
delete[] Map[i];
}
float t1,t2,t3;
hipEventElapsedTime(&t1, start, stopCopyTo);
hipEventElapsedTime(&t2, stopCopyTo, stopWork);
hipEventElapsedTime(&t3, stopWork, stopCopyFrom);
//cout<<"Threads: "<< numbOfBlock*numbOfThread <<"\tTime: "<<t2<<endl;
Results.insert(pair<int,float>(numbOfBlock*numbOfThread,t2));
}
}
map<int,float>::iterator it;
ofstream fout("test0.txt");
for (it = Results.begin(); it != Results.end(); ++it)///вывод на экран
{
fout << it->first << ' ' << it->second << endl;
}
fout.close();
//cout << "Количество точек: \t\t" << columns*rows << endl;
//cout << "Количество потоков: \t\t" << numbOfBlock*numbOfThread << endl;
//cout << "Время копирования на GPU: \t" << t1 << endl;
//cout << "Время выполенния: \t\t" << t2 << endl;
//cout << "Время копирования с GPU: \t" << t3 << endl;
return 0;
} | .text
.file "test000.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z11findBarrieriiPPi # -- Begin function _Z11findBarrieriiPPi
.p2align 4, 0x90
.type _Z11findBarrieriiPPi,@function
_Z11findBarrieriiPPi: # @_Z11findBarrieriiPPi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
movq %rdx, %rbx
movl %esi, %ebp
movl $4000, %edi # imm = 0xFA0
callq _Znam
xorl %ecx, %ecx
jmp .LBB0_1
.p2align 4, 0x90
.LBB0_4: # in Loop: Header=BB0_1 Depth=1
movl %edx, (%rax,%rcx,4)
.LBB0_5: # %.loopexit
# in Loop: Header=BB0_1 Depth=1
incq %rcx
cmpq $1000, %rcx # imm = 0x3E8
je .LBB0_6
.LBB0_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_2 Depth 2
movl %ebp, %edx
.p2align 4, 0x90
.LBB0_2: # Parent Loop BB0_1 Depth=1
# => This Inner Loop Header: Depth=2
testl %edx, %edx
jle .LBB0_5
# %bb.3: # in Loop: Header=BB0_2 Depth=2
movl %edx, %esi
movq (%rbx,%rsi,8), %rsi
movl (%rsi,%rcx,4), %esi
decl %edx
movq (%rbx,%rdx,8), %rdi
subl (%rdi,%rcx,4), %esi
movl %esi, %edi
negl %edi
cmovsl %esi, %edi
cmpl $11, %edi
jb .LBB0_2
jmp .LBB0_4
.LBB0_6:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z11findBarrieriiPPi, .Lfunc_end0-_Z11findBarrieriiPPi
.cfi_endproc
# -- End function
.globl _Z25__device_stub__SomeKernelPiS_iiii # -- Begin function _Z25__device_stub__SomeKernelPiS_iiii
.p2align 4, 0x90
.type _Z25__device_stub__SomeKernelPiS_iiii,@function
_Z25__device_stub__SomeKernelPiS_iiii: # @_Z25__device_stub__SomeKernelPiS_iiii
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10SomeKernelPiS_iiii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end1:
.size _Z25__device_stub__SomeKernelPiS_iiii, .Lfunc_end1-_Z25__device_stub__SomeKernelPiS_iiii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0: # %.critedge
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $696, %rsp # imm = 0x2B8
.cfi_def_cfa_offset 752
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 112(%rsp), %rax
movl $0, 112(%rsp)
movq $0, 120(%rsp)
movq %rax, 128(%rsp)
movq %rax, 136(%rsp)
movq $0, 144(%rsp)
movl $1, %r14d
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_49: # %.critedge.i
# in Loop: Header=BB2_1 Depth=1
.Ltmp50:
.cfi_escape 0x2e, 0x00
leaq 104(%rsp), %rdi
leaq 176(%rsp), %rdx
callq _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_
.Ltmp51:
.LBB2_50: # %_ZNSt3mapIifSt4lessIiESaISt4pairIKifEEE6insertIS2_IifEEENSt9enable_ifIXsr16is_constructibleIS4_T_EE5valueES2_ISt17_Rb_tree_iteratorIS4_EbEE4typeEOSA_.exit
# in Loop: Header=BB2_1 Depth=1
leaq 10(%r14), %rax
cmpq $991, %r14 # imm = 0x3DF
movq %rax, %r14
jae .LBB2_51
.LBB2_1: # =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
# Child Loop BB2_5 Depth 3
# Child Loop BB2_10 Depth 2
# Child Loop BB2_11 Depth 3
# Child Loop BB2_32 Depth 2
# Child Loop BB2_45 Depth 2
.Ltmp0:
.cfi_escape 0x2e, 0x00
movl $8000, %edi # imm = 0x1F40
callq _Znam
.Ltmp1:
# %bb.2: # %.preheader106.preheader
# in Loop: Header=BB2_1 Depth=1
movq %rax, %rbp
.cfi_escape 0x2e, 0x00
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl $4000, %edx # imm = 0xFA0
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
xorl %r12d, %r12d
jmp .LBB2_3
.p2align 4, 0x90
.LBB2_8: # in Loop: Header=BB2_3 Depth=2
incq %r12
cmpq $1000, %r12 # imm = 0x3E8
je .LBB2_9
.LBB2_3: # %.preheader106
# Parent Loop BB2_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_5 Depth 3
.Ltmp3:
.cfi_escape 0x2e, 0x00
movl $4000, %edi # imm = 0xFA0
callq _Znam
.Ltmp4:
# %bb.4: # in Loop: Header=BB2_3 Depth=2
movq %rax, %r13
movq %rax, (%rbp,%r12,8)
xorl %r15d, %r15d
jmp .LBB2_5
.p2align 4, 0x90
.LBB2_40: # in Loop: Header=BB2_5 Depth=3
movl $20, (%r13)
.LBB2_41: # in Loop: Header=BB2_5 Depth=3
incq %r15
cmpq $1000, %r15 # imm = 0x3E8
je .LBB2_8
.LBB2_5: # Parent Loop BB2_1 Depth=1
# Parent Loop BB2_3 Depth=2
# => This Inner Loop Header: Depth=3
testq %r15, %r15
je .LBB2_40
# %bb.6: # in Loop: Header=BB2_5 Depth=3
.cfi_escape 0x2e, 0x00
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
addl $-5, %eax
movl %eax, (%r13,%r15,4)
jmp .LBB2_41
.p2align 4, 0x90
.LBB2_9: # in Loop: Header=BB2_1 Depth=1
.cfi_escape 0x2e, 0x00
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r13
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_10: # %.preheader
# Parent Loop BB2_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_11 Depth 3
movq (%rbp,%rcx,8), %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_11: # Parent Loop BB2_1 Depth=1
# Parent Loop BB2_10 Depth=2
# => This Inner Loop Header: Depth=3
movl (%rdx,%rsi,4), %edi
movl %edi, (%rax,%rsi,4)
incq %rsi
cmpq $1000, %rsi # imm = 0x3E8
jne .LBB2_11
# %bb.12: # in Loop: Header=BB2_10 Depth=2
incq %rcx
addq $4000, %rax # imm = 0xFA0
cmpq $1000, %rcx # imm = 0x3E8
jne .LBB2_10
# %bb.13: # in Loop: Header=BB2_1 Depth=1
.Ltmp6:
.cfi_escape 0x2e, 0x00
leaq 88(%rsp), %rdi
callq hipEventCreate
.Ltmp7:
# %bb.14: # in Loop: Header=BB2_1 Depth=1
.Ltmp8:
.cfi_escape 0x2e, 0x00
leaq 24(%rsp), %rdi
callq hipEventCreate
.Ltmp9:
# %bb.15: # in Loop: Header=BB2_1 Depth=1
.Ltmp10:
.cfi_escape 0x2e, 0x00
leaq 16(%rsp), %rdi
callq hipEventCreate
.Ltmp11:
# %bb.16: # in Loop: Header=BB2_1 Depth=1
.Ltmp12:
.cfi_escape 0x2e, 0x00
leaq 80(%rsp), %rdi
callq hipEventCreate
.Ltmp13:
# %bb.17: # in Loop: Header=BB2_1 Depth=1
.Ltmp15:
.cfi_escape 0x2e, 0x00
movl $4000000, %esi # imm = 0x3D0900
leaq 8(%rsp), %rdi
callq hipMalloc
.Ltmp16:
# %bb.18: # in Loop: Header=BB2_1 Depth=1
.Ltmp17:
.cfi_escape 0x2e, 0x00
movl $4000, %esi # imm = 0xFA0
movq %rsp, %rdi
callq hipMalloc
.Ltmp18:
# %bb.19: # in Loop: Header=BB2_1 Depth=1
movq 88(%rsp), %rdi
.Ltmp19:
.cfi_escape 0x2e, 0x00
xorl %esi, %esi
callq hipEventRecord
.Ltmp20:
# %bb.20: # in Loop: Header=BB2_1 Depth=1
movq 8(%rsp), %rdi
.Ltmp21:
.cfi_escape 0x2e, 0x00
movl $4000000, %edx # imm = 0x3D0900
movq %r13, %rsi
movl $1, %ecx
callq hipMemcpy
.Ltmp22:
# %bb.21: # in Loop: Header=BB2_1 Depth=1
movq (%rsp), %rdi
.Ltmp23:
.cfi_escape 0x2e, 0x00
movl $4000, %edx # imm = 0xFA0
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
.Ltmp24:
# %bb.22: # in Loop: Header=BB2_1 Depth=1
movq 24(%rsp), %rdi
.Ltmp26:
.cfi_escape 0x2e, 0x00
xorl %esi, %esi
callq hipEventRecord
.Ltmp27:
# %bb.23: # in Loop: Header=BB2_1 Depth=1
movq %r14, %rdx
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdx
.Ltmp28:
.cfi_escape 0x2e, 0x00
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
.Ltmp29:
# %bb.24: # in Loop: Header=BB2_1 Depth=1
testl %eax, %eax
jne .LBB2_27
# %bb.25: # in Loop: Header=BB2_1 Depth=1
movq (%rsp), %rcx
movq 8(%rsp), %rsi
movl $1000000, %eax # imm = 0xF4240
xorl %edx, %edx
divl %r14d
movq %rcx, 96(%rsp)
movq %rsi, 168(%rsp)
movl $1000, 60(%rsp) # imm = 0x3E8
movl $1000, 56(%rsp) # imm = 0x3E8
movl $999, 52(%rsp) # imm = 0x3E7
movl %eax, 48(%rsp)
leaq 96(%rsp), %rax
movq %rax, 176(%rsp)
leaq 168(%rsp), %rax
movq %rax, 184(%rsp)
leaq 60(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
leaq 52(%rsp), %rax
movq %rax, 208(%rsp)
leaq 48(%rsp), %rax
movq %rax, 216(%rsp)
.Ltmp30:
.cfi_escape 0x2e, 0x00
leaq 32(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 160(%rsp), %rdx
leaq 152(%rsp), %rcx
callq __hipPopCallConfiguration
.Ltmp31:
# %bb.26: # %.noexc
# in Loop: Header=BB2_1 Depth=1
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
.Ltmp32:
.cfi_escape 0x2e, 0x10
movl $_Z10SomeKernelPiS_iiii, %edi
leaq 176(%rsp), %r9
pushq 152(%rsp)
.cfi_adjust_cfa_offset 8
pushq 168(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.Ltmp33:
.LBB2_27: # in Loop: Header=BB2_1 Depth=1
movq 16(%rsp), %rdi
.Ltmp34:
.cfi_escape 0x2e, 0x00
xorl %esi, %esi
callq hipEventRecord
.Ltmp35:
# %bb.28: # in Loop: Header=BB2_1 Depth=1
movq 8(%rsp), %rsi
.Ltmp36:
.cfi_escape 0x2e, 0x00
movl $4000000, %edx # imm = 0x3D0900
movq %r13, %rdi
movl $2, %ecx
callq hipMemcpy
.Ltmp37:
# %bb.29: # in Loop: Header=BB2_1 Depth=1
movq (%rsp), %rsi
.Ltmp38:
.cfi_escape 0x2e, 0x00
movl $4000, %edx # imm = 0xFA0
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
.Ltmp39:
# %bb.30: # in Loop: Header=BB2_1 Depth=1
movq 80(%rsp), %rdi
.Ltmp40:
.cfi_escape 0x2e, 0x00
xorl %esi, %esi
callq hipEventRecord
.Ltmp41:
# %bb.31: # %.preheader105.preheader
# in Loop: Header=BB2_1 Depth=1
xorl %ebx, %ebx
jmp .LBB2_32
.p2align 4, 0x90
.LBB2_34: # in Loop: Header=BB2_32 Depth=2
incq %rbx
cmpq $1000, %rbx # imm = 0x3E8
je .LBB2_35
.LBB2_32: # %.preheader105
# Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
movq (%rbp,%rbx,8), %rdi
testq %rdi, %rdi
je .LBB2_34
# %bb.33: # in Loop: Header=BB2_32 Depth=2
.cfi_escape 0x2e, 0x00
callq _ZdaPv
jmp .LBB2_34
.p2align 4, 0x90
.LBB2_35: # in Loop: Header=BB2_1 Depth=1
movq 88(%rsp), %rsi
movq 24(%rsp), %rdx
.Ltmp43:
.cfi_escape 0x2e, 0x00
leaq 32(%rsp), %rdi
callq hipEventElapsedTime
.Ltmp44:
# %bb.36: # in Loop: Header=BB2_1 Depth=1
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
.Ltmp45:
.cfi_escape 0x2e, 0x00
leaq 64(%rsp), %rdi
callq hipEventElapsedTime
.Ltmp46:
# %bb.37: # in Loop: Header=BB2_1 Depth=1
movq 16(%rsp), %rsi
movq 80(%rsp), %rdx
.Ltmp47:
.cfi_escape 0x2e, 0x00
leaq 96(%rsp), %rdi
callq hipEventElapsedTime
.Ltmp48:
# %bb.38: # in Loop: Header=BB2_1 Depth=1
movl %r14d, 176(%rsp)
movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 180(%rsp)
movq 120(%rsp), %rax
leaq 112(%rsp), %rsi
testq %rax, %rax
je .LBB2_47
.p2align 4, 0x90
.LBB2_45: # %.lr.ph.i.i.i.i
# Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
movslq 32(%rax), %rcx
xorl %edx, %edx
cmpq %rcx, %r14
setg %dl
cmovleq %rax, %rsi
movq 16(%rax,%rdx,8), %rax
testq %rax, %rax
jne .LBB2_45
.LBB2_47: # %_ZNSt3mapIifSt4lessIiESaISt4pairIKifEEE11lower_boundERS3_.exit.i
# in Loop: Header=BB2_1 Depth=1
leaq 112(%rsp), %rax
cmpq %rax, %rsi
je .LBB2_49
# %bb.48: # in Loop: Header=BB2_1 Depth=1
movslq 32(%rsi), %rax
cmpq %rax, %r14
jl .LBB2_49
jmp .LBB2_50
.LBB2_51:
.Ltmp53:
.cfi_escape 0x2e, 0x00
leaq 176(%rsp), %rdi
movl $.L.str, %esi
movl $16, %edx
callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode
.Ltmp54:
# %bb.52:
movq 128(%rsp), %rbx
leaq 112(%rsp), %rax
cmpq %rax, %rbx
je .LBB2_71
# %bb.53: # %.lr.ph.preheader
leaq 176(%rsp), %r14
leaq 32(%rsp), %rbp
.p2align 4, 0x90
.LBB2_54: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl 32(%rbx), %esi
.Ltmp56:
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
callq _ZNSolsEi
.Ltmp57:
# %bb.55: # in Loop: Header=BB2_54 Depth=1
movb $32, 32(%rsp)
movq (%rax), %rcx
movq -24(%rcx), %rcx
cmpq $0, 16(%rax,%rcx)
je .LBB2_59
# %bb.56: # in Loop: Header=BB2_54 Depth=1
.Ltmp58:
.cfi_escape 0x2e, 0x00
movl $1, %edx
movq %rax, %rdi
movq %rbp, %rsi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp59:
jmp .LBB2_60
.p2align 4, 0x90
.LBB2_59: # in Loop: Header=BB2_54 Depth=1
.Ltmp60:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
movl $32, %esi
movq %rax, %r15
callq _ZNSo3putEc
movq %r15, %rax
.Ltmp61:
.LBB2_60: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c.exit
# in Loop: Header=BB2_54 Depth=1
movss 36(%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
.Ltmp62:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp63:
# %bb.61: # %_ZNSolsEf.exit
# in Loop: Header=BB2_54 Depth=1
movq %rax, %r12
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %r13
testq %r13, %r13
je .LBB2_62
# %bb.64: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB2_54 Depth=1
cmpb $0, 56(%r13)
je .LBB2_66
# %bb.65: # in Loop: Header=BB2_54 Depth=1
movzbl 67(%r13), %eax
jmp .LBB2_68
.p2align 4, 0x90
.LBB2_66: # in Loop: Header=BB2_54 Depth=1
.Ltmp64:
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp65:
# %bb.67: # %.noexc94
# in Loop: Header=BB2_54 Depth=1
movq (%r13), %rax
.Ltmp66:
.cfi_escape 0x2e, 0x00
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp67:
.LBB2_68: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i
# in Loop: Header=BB2_54 Depth=1
.Ltmp68:
.cfi_escape 0x2e, 0x00
movsbl %al, %esi
movq %r12, %rdi
callq _ZNSo3putEc
.Ltmp69:
# %bb.69: # %.noexc96
# in Loop: Header=BB2_54 Depth=1
.Ltmp70:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp71:
# %bb.70: # %_ZNSolsEPFRSoS_E.exit
# in Loop: Header=BB2_54 Depth=1
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _ZSt18_Rb_tree_incrementPSt18_Rb_tree_node_base
movq %rax, %rbx
leaq 112(%rsp), %rax
cmpq %rax, %rbx
jne .LBB2_54
.LBB2_71: # %._crit_edge
leaq 184(%rsp), %rdi
.Ltmp75:
.cfi_escape 0x2e, 0x00
callq _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv
.Ltmp76:
# %bb.72: # %.noexc91
testq %rax, %rax
jne .LBB2_74
# %bb.73:
movq 176(%rsp), %rax
movq -24(%rax), %rax
leaq (%rsp,%rax), %rdi
addq $176, %rdi
movl 208(%rsp,%rax), %esi
orl $4, %esi
.Ltmp77:
.cfi_escape 0x2e, 0x00
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.Ltmp78:
.LBB2_74: # %_ZNSt14basic_ofstreamIcSt11char_traitsIcEE5closeEv.exit
.cfi_escape 0x2e, 0x00
leaq 176(%rsp), %rdi
callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev
movq 120(%rsp), %rsi
.Ltmp80:
.cfi_escape 0x2e, 0x00
leaq 104(%rsp), %rdi
callq _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
.Ltmp81:
# %bb.75: # %_ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev.exit
xorl %eax, %eax
addq $696, %rsp # imm = 0x2B8
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_62:
.cfi_def_cfa_offset 752
.Ltmp73:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.Ltmp74:
# %bb.63: # %.noexc93
.LBB2_79:
.Ltmp82:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq __clang_call_terminate
.LBB2_80:
.Ltmp55:
jmp .LBB2_81
.LBB2_77: # %.loopexit.split-lp
.Ltmp79:
jmp .LBB2_78
.LBB2_58:
.Ltmp52:
jmp .LBB2_81
.LBB2_7:
.Ltmp2:
jmp .LBB2_81
.LBB2_57:
.Ltmp49:
jmp .LBB2_81
.LBB2_76: # %.loopexit
.Ltmp72:
.LBB2_78:
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
leaq 176(%rsp), %rdi
callq _ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev
jmp .LBB2_82
.LBB2_42:
.Ltmp14:
jmp .LBB2_81
.LBB2_43:
.Ltmp25:
jmp .LBB2_81
.LBB2_44:
.Ltmp42:
jmp .LBB2_81
.LBB2_39:
.Ltmp5:
.LBB2_81:
movq %rax, %rbx
.LBB2_82:
.cfi_escape 0x2e, 0x00
leaq 104(%rsp), %rdi
callq _ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table2:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 3 # @TType Encoding = udata4
.uleb128 .Lttbase0-.Lttbaseref0
.Lttbaseref0:
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Ltmp50-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp51-.Ltmp50 # Call between .Ltmp50 and .Ltmp51
.uleb128 .Ltmp52-.Lfunc_begin0 # jumps to .Ltmp52
.byte 0 # On action: cleanup
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1
.uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2
.byte 0 # On action: cleanup
.uleb128 .Ltmp1-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp3-.Ltmp1 # Call between .Ltmp1 and .Ltmp3
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp4-.Ltmp3 # Call between .Ltmp3 and .Ltmp4
.uleb128 .Ltmp5-.Lfunc_begin0 # jumps to .Ltmp5
.byte 0 # On action: cleanup
.uleb128 .Ltmp6-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp13-.Ltmp6 # Call between .Ltmp6 and .Ltmp13
.uleb128 .Ltmp14-.Lfunc_begin0 # jumps to .Ltmp14
.byte 0 # On action: cleanup
.uleb128 .Ltmp15-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp24-.Ltmp15 # Call between .Ltmp15 and .Ltmp24
.uleb128 .Ltmp25-.Lfunc_begin0 # jumps to .Ltmp25
.byte 0 # On action: cleanup
.uleb128 .Ltmp26-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Ltmp41-.Ltmp26 # Call between .Ltmp26 and .Ltmp41
.uleb128 .Ltmp42-.Lfunc_begin0 # jumps to .Ltmp42
.byte 0 # On action: cleanup
.uleb128 .Ltmp43-.Lfunc_begin0 # >> Call Site 8 <<
.uleb128 .Ltmp48-.Ltmp43 # Call between .Ltmp43 and .Ltmp48
.uleb128 .Ltmp49-.Lfunc_begin0 # jumps to .Ltmp49
.byte 0 # On action: cleanup
.uleb128 .Ltmp53-.Lfunc_begin0 # >> Call Site 9 <<
.uleb128 .Ltmp54-.Ltmp53 # Call between .Ltmp53 and .Ltmp54
.uleb128 .Ltmp55-.Lfunc_begin0 # jumps to .Ltmp55
.byte 0 # On action: cleanup
.uleb128 .Ltmp56-.Lfunc_begin0 # >> Call Site 10 <<
.uleb128 .Ltmp71-.Ltmp56 # Call between .Ltmp56 and .Ltmp71
.uleb128 .Ltmp72-.Lfunc_begin0 # jumps to .Ltmp72
.byte 0 # On action: cleanup
.uleb128 .Ltmp75-.Lfunc_begin0 # >> Call Site 11 <<
.uleb128 .Ltmp78-.Ltmp75 # Call between .Ltmp75 and .Ltmp78
.uleb128 .Ltmp79-.Lfunc_begin0 # jumps to .Ltmp79
.byte 0 # On action: cleanup
.uleb128 .Ltmp80-.Lfunc_begin0 # >> Call Site 12 <<
.uleb128 .Ltmp81-.Ltmp80 # Call between .Ltmp80 and .Ltmp81
.uleb128 .Ltmp82-.Lfunc_begin0 # jumps to .Ltmp82
.byte 1 # On action: 1
.uleb128 .Ltmp73-.Lfunc_begin0 # >> Call Site 13 <<
.uleb128 .Ltmp74-.Ltmp73 # Call between .Ltmp73 and .Ltmp74
.uleb128 .Ltmp79-.Lfunc_begin0 # jumps to .Ltmp79
.byte 0 # On action: cleanup
.uleb128 .Ltmp74-.Lfunc_begin0 # >> Call Site 14 <<
.uleb128 .Lfunc_end2-.Ltmp74 # Call between .Ltmp74 and .Lfunc_end2
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.byte 1 # >> Action Record 1 <<
# Catch TypeInfo 1
.byte 0 # No further actions
.p2align 2, 0x0
# >> Catch TypeInfos <<
.long 0 # TypeInfo 1
.Lttbase0:
.p2align 2, 0x0
# -- End function
.section .text._ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev,"axG",@progbits,_ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev,comdat
.weak _ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev # -- Begin function _ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev
.p2align 4, 0x90
.type _ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev,@function
_ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev: # @_ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev
.Lfunc_begin1:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception1
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
.Ltmp83:
callq _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
.Ltmp84:
# %bb.1: # %_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EED2Ev.exit
popq %rax
.cfi_def_cfa_offset 8
retq
.LBB3_2:
.cfi_def_cfa_offset 16
.Ltmp85:
movq %rax, %rdi
callq __clang_call_terminate
.Lfunc_end3:
.size _ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev, .Lfunc_end3-_ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev
.cfi_endproc
.section .gcc_except_table._ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev,"aG",@progbits,_ZNSt3mapIifSt4lessIiESaISt4pairIKifEEED2Ev,comdat
.p2align 2, 0x0
GCC_except_table3:
.Lexception1:
.byte 255 # @LPStart Encoding = omit
.byte 3 # @TType Encoding = udata4
.uleb128 .Lttbase1-.Lttbaseref1
.Lttbaseref1:
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end1-.Lcst_begin1
.Lcst_begin1:
.uleb128 .Ltmp83-.Lfunc_begin1 # >> Call Site 1 <<
.uleb128 .Ltmp84-.Ltmp83 # Call between .Ltmp83 and .Ltmp84
.uleb128 .Ltmp85-.Lfunc_begin1 # jumps to .Ltmp85
.byte 1 # On action: 1
.Lcst_end1:
.byte 1 # >> Action Record 1 <<
# Catch TypeInfo 1
.byte 0 # No further actions
.p2align 2, 0x0
# >> Catch TypeInfos <<
.long 0 # TypeInfo 1
.Lttbase1:
.p2align 2, 0x0
# -- End function
.section .text.__clang_call_terminate,"axG",@progbits,__clang_call_terminate,comdat
.hidden __clang_call_terminate # -- Begin function __clang_call_terminate
.weak __clang_call_terminate
.p2align 4, 0x90
.type __clang_call_terminate,@function
__clang_call_terminate: # @__clang_call_terminate
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq __cxa_begin_catch
callq _ZSt9terminatev
.Lfunc_end4:
.size __clang_call_terminate, .Lfunc_end4-__clang_call_terminate
.cfi_endproc
# -- End function
.section .text._ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E,"axG",@progbits,_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E,comdat
.weak _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E # -- Begin function _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
.p2align 4, 0x90
.type _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E,@function
_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E: # @_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
.cfi_startproc
# %bb.0:
testq %rsi, %rsi
je .LBB5_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %r14
movq %rdi, %rbx
.p2align 4, 0x90
.LBB5_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq 24(%r14), %rsi
movq %rbx, %rdi
callq _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
movq 16(%r14), %r15
movq %r14, %rdi
callq _ZdlPv
movq %r15, %r14
testq %r15, %r15
jne .LBB5_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB5_4: # %._crit_edge
retq
.Lfunc_end5:
.size _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E, .Lfunc_end5-_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE8_M_eraseEPSt13_Rb_tree_nodeIS2_E
.cfi_endproc
# -- End function
.section .text._ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_,"axG",@progbits,_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_,comdat
.weak _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_ # -- Begin function _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_
.p2align 4, 0x90
.type _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_,@function
_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_: # @_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_
.Lfunc_begin2:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception2
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %r14
movl $40, %edi
callq _Znwm
movq %rax, %rbx
leaq 32(%rax), %r15
movl (%r12), %eax
movl %eax, 32(%rbx)
movss 4(%r12), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm0, 36(%rbx)
.Ltmp86:
movq %r14, %rdi
movq %r13, %rsi
movq %r15, %rdx
callq _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_
.Ltmp87:
# %bb.1:
testq %rdx, %rdx
je .LBB6_2
# %bb.3:
leaq 8(%r14), %rcx
movb $1, %sil
testq %rax, %rax
jne .LBB6_6
# %bb.4:
cmpq %rdx, %rcx
je .LBB6_6
# %bb.5:
movl (%r15), %eax
cmpl 32(%rdx), %eax
setl %sil
.LBB6_6: # %_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE10_Auto_node9_M_insertES0_IPSt18_Rb_tree_node_baseSB_E.exit
movzbl %sil, %edi
movq %rbx, %rsi
callq _ZSt29_Rb_tree_insert_and_rebalancebPSt18_Rb_tree_node_baseS0_RS_
incq 40(%r14)
xorl %edi, %edi
testq %rdi, %rdi
je .LBB6_9
.LBB6_8:
callq _ZdlPv
.LBB6_9: # %_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE10_Auto_nodeD2Ev.exit5
movq %rbx, %rax
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB6_2:
.cfi_def_cfa_offset 48
movq %rbx, %rdi
movq %rax, %rbx
testq %rdi, %rdi
jne .LBB6_8
jmp .LBB6_9
.LBB6_10: # %_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE10_Auto_nodeD2Ev.exit
.Ltmp88:
movq %rax, %r14
movq %rbx, %rdi
callq _ZdlPv
movq %r14, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end6:
.size _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_, .Lfunc_end6-_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_
.cfi_endproc
.section .gcc_except_table._ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_,"aG",@progbits,_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE22_M_emplace_hint_uniqueIJS0_IifEEEESt17_Rb_tree_iteratorIS2_ESt23_Rb_tree_const_iteratorIS2_EDpOT_,comdat
.p2align 2, 0x0
GCC_except_table6:
.Lexception2:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end2-.Lcst_begin2
.Lcst_begin2:
.uleb128 .Lfunc_begin2-.Lfunc_begin2 # >> Call Site 1 <<
.uleb128 .Ltmp86-.Lfunc_begin2 # Call between .Lfunc_begin2 and .Ltmp86
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp86-.Lfunc_begin2 # >> Call Site 2 <<
.uleb128 .Ltmp87-.Ltmp86 # Call between .Ltmp86 and .Ltmp87
.uleb128 .Ltmp88-.Lfunc_begin2 # jumps to .Ltmp88
.byte 0 # On action: cleanup
.uleb128 .Ltmp87-.Lfunc_begin2 # >> Call Site 3 <<
.uleb128 .Lfunc_end6-.Ltmp87 # Call between .Ltmp87 and .Lfunc_end6
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end2:
.p2align 2, 0x0
# -- End function
.section .text._ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_,"axG",@progbits,_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_,comdat
.weak _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_ # -- Begin function _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_
.p2align 4, 0x90
.type _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_,@function
_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_: # @_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdx, %r14
leaq 8(%rdi), %rbx
cmpq %rsi, %rbx
je .LBB7_5
# %bb.1:
movq %rsi, %rax
movl (%r14), %ebp
movl 32(%rsi), %ecx
cmpl %ecx, %ebp
jge .LBB7_8
# %bb.2:
movq 24(%rdi), %rdx
cmpq %rax, %rdx
je .LBB7_16
# %bb.3:
movq %rdi, %r12
movq %rdx, %r14
movq %rax, %r15
movq %rax, %rdi
callq _ZSt18_Rb_tree_decrementPSt18_Rb_tree_node_base
cmpl %ebp, 32(%rax)
jge .LBB7_17
# %bb.4:
movq 24(%rax), %rcx
testq %rcx, %rcx
cmovneq %r15, %rcx
movq %r15, %rdx
cmoveq %rax, %rdx
movq %rcx, %rax
jmp .LBB7_39
.LBB7_5:
cmpq $0, 40(%rdi)
je .LBB7_12
# %bb.6:
movq 32(%rdi), %rdx
movl 32(%rdx), %eax
cmpl (%r14), %eax
jl .LBB7_7
.LBB7_12:
movq 16(%rdi), %rax
testq %rax, %rax
je .LBB7_20
# %bb.13: # %.lr.ph.i
movl (%r14), %ecx
.p2align 4, 0x90
.LBB7_14: # =>This Inner Loop Header: Depth=1
movq %rax, %rbx
xorl %eax, %eax
cmpl 32(%rbx), %ecx
setl %dl
setge %al
movq 16(%rbx,%rax,8), %rax
testq %rax, %rax
jne .LBB7_14
jmp .LBB7_21
.LBB7_8:
jle .LBB7_15
# %bb.9:
movq 32(%rdi), %rdx
cmpq %rax, %rdx
je .LBB7_7
# %bb.10:
movq %rdi, %r14
movq %rax, %r15
movq %rax, %rdi
callq _ZSt18_Rb_tree_incrementPSt18_Rb_tree_node_base
cmpl 32(%rax), %ebp
jge .LBB7_26
# %bb.11:
movq %rax, %rdx
movq 24(%r15), %rax
testq %rax, %rax
cmovneq %rdx, %rax
cmoveq %r15, %rdx
jmp .LBB7_39
.LBB7_7:
xorl %eax, %eax
jmp .LBB7_39
.LBB7_15:
xorl %edx, %edx
jmp .LBB7_39
.LBB7_16:
movq %rdx, %rax
jmp .LBB7_39
.LBB7_17:
movq 16(%r12), %rax
testq %rax, %rax
je .LBB7_29
# %bb.18: # %.lr.ph.i12.preheader
movq %r14, %rdx
.p2align 4, 0x90
.LBB7_19: # %.lr.ph.i12
# =>This Inner Loop Header: Depth=1
movq %rax, %rbx
xorl %eax, %eax
cmpl 32(%rbx), %ebp
setl %sil
setge %al
movq 16(%rbx,%rax,8), %rax
testq %rax, %rax
jne .LBB7_19
jmp .LBB7_30
.LBB7_20:
movb $1, %dl
.LBB7_21: # %._crit_edge.i
movq %rbx, %rcx
testb %dl, %dl
je .LBB7_24
# %bb.22:
cmpq 24(%rdi), %rbx
je .LBB7_28
# %bb.23:
movq %rbx, %rdi
movq %rax, %r15
callq _ZSt18_Rb_tree_decrementPSt18_Rb_tree_node_base
movq %rax, %rcx
movq %r15, %rax
.LBB7_24:
movl 32(%rcx), %esi
xorl %edx, %edx
cmpl (%r14), %esi
jmp .LBB7_38
.LBB7_26:
movq %r14, %rdx
movq 16(%r14), %rax
testq %rax, %rax
je .LBB7_33
.p2align 4, 0x90
.LBB7_27: # %.lr.ph.i31
# =>This Inner Loop Header: Depth=1
movq %rax, %rbx
xorl %eax, %eax
cmpl 32(%rbx), %ebp
setl %sil
setge %al
movq 16(%rbx,%rax,8), %rax
testq %rax, %rax
jne .LBB7_27
jmp .LBB7_34
.LBB7_29:
movb $1, %sil
movq %r14, %rdx
.LBB7_30: # %._crit_edge.i17
movq %rbx, %rcx
testb %sil, %sil
je .LBB7_37
# %bb.31:
cmpq %rdx, %rbx
je .LBB7_28
.LBB7_36:
movq %rbx, %rdi
movq %rax, %r14
callq _ZSt18_Rb_tree_decrementPSt18_Rb_tree_node_base
movq %rax, %rcx
movq %r14, %rax
jmp .LBB7_37
.LBB7_33:
movb $1, %sil
.LBB7_34: # %._crit_edge.i36
movq %rbx, %rcx
testb %sil, %sil
je .LBB7_37
# %bb.35:
cmpq 24(%rdx), %rbx
jne .LBB7_36
.LBB7_28:
movq %rbx, %rdx
jmp .LBB7_39
.LBB7_37:
xorl %edx, %edx
cmpl %ebp, 32(%rcx)
.LBB7_38: # %_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_.exit
cmovgeq %rcx, %rax
cmovlq %rbx, %rdx
.LBB7_39: # %_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE24_M_get_insert_unique_posERS1_.exit
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size _ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_, .Lfunc_end7-_ZNSt8_Rb_treeIiSt4pairIKifESt10_Select1stIS2_ESt4lessIiESaIS2_EE29_M_get_insert_hint_unique_posESt23_Rb_tree_const_iteratorIS2_ERS1_
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10SomeKernelPiS_iiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10SomeKernelPiS_iiii,@object # @_Z10SomeKernelPiS_iiii
.section .rodata,"a",@progbits
.globl _Z10SomeKernelPiS_iiii
.p2align 3, 0x0
_Z10SomeKernelPiS_iiii:
.quad _Z25__device_stub__SomeKernelPiS_iiii
.size _Z10SomeKernelPiS_iiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "test0.txt"
.size .L.str, 10
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10SomeKernelPiS_iiii"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__SomeKernelPiS_iiii
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z10SomeKernelPiS_iiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10SomeKernelPiS_iiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0203 */
/*0040*/ IMAD R0, R0, c[0x0][0x17c], RZ ; /* 0x00005f0000007a24 */
/* 0x000fca00078e02ff */
/*0050*/ ISETP.GE.AND P0, PT, R0.reuse, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x040fe40003f06270 */
/*0060*/ IADD3 R7, R0, c[0x0][0x17c], RZ ; /* 0x00005f0000077a10 */
/* 0x000fc80007ffe0ff */
/*0070*/ ISETP.GE.U32.OR P0, PT, R0, R7, P0 ; /* 0x000000070000720c */
/* 0x000fda0000706470 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00a0*/ BSSY B0, 0x220 ; /* 0x0000017000007945 */
/* 0x000fe20003800000 */
/*00b0*/ BSSY B1, 0x1f0 ; /* 0x0000013000017945 */
/* 0x000fe20003800000 */
/*00c0*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff097624 */
/* 0x001fca00078e00ff */
/*00d0*/ ISETP.GE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fda0003f06270 */
/*00e0*/ @!P0 BREAK B1 ; /* 0x0000000000018942 */
/* 0x000fe20003800000 */
/*00f0*/ @!P0 BRA 0x210 ; /* 0x0000011000008947 */
/* 0x000fea0003800000 */
/*0100*/ MOV R2, c[0x0][0x174] ; /* 0x00005d0000027a02 */
/* 0x000fe20000000f00 */
/*0110*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fc800078e00ff */
/*0120*/ IMAD R3, R9.reuse, R2, -c[0x0][0x174] ; /* 0x80005d0009037624 */
/* 0x040fe400078e0202 */
/*0130*/ IMAD R2, R9, c[0x0][0x174], R0 ; /* 0x00005d0009027a24 */
/* 0x000fc600078e0200 */
/*0140*/ IADD3 R4, R3, R0, RZ ; /* 0x0000000003047210 */
/* 0x000fe20007ffe0ff */
/*0150*/ IMAD.WIDE R2, R2, R11, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fc800078e020b */
/*0160*/ IMAD.WIDE R4, R4, R11, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fe400078e020b */
/*0170*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*0180*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea2000c1e1900 */
/*0190*/ IADD3 R9, R9, -0x1, RZ ; /* 0xffffffff09097810 */
/* 0x000fe20007ffe0ff */
/*01a0*/ IMAD.IADD R6, R3, 0x1, -R4 ; /* 0x0000000103067824 */
/* 0x004fca00078e0a04 */
/*01b0*/ IABS R6, R6 ; /* 0x0000000600067213 */
/* 0x000fc80000000000 */
/*01c0*/ ISETP.GE.AND P0, PT, R6, 0xb, PT ; /* 0x0000000b0600780c */
/* 0x000fda0003f06270 */
/*01d0*/ @!P0 BRA 0xd0 ; /* 0xfffffef000008947 */
/* 0x000fea000383ffff */
/*01e0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*01f0*/ IMAD.WIDE R2, R0, R11, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e020b */
/*0200*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x0001e4000c101904 */
/*0210*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0220*/ IADD3 R0, R0, 0x1, RZ ; /* 0x0000000100007810 */
/* 0x000fc80007ffe0ff */
/*0230*/ ISETP.GE.U32.AND P0, PT, R0.reuse, R7, PT ; /* 0x000000070000720c */
/* 0x040fe40003f06070 */
/*0240*/ ISETP.LT.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f21270 */
/*0250*/ @!P0 BRA P1, 0xa0 ; /* 0xfffffe4000008947 */
/* 0x000fea000083ffff */
/*0260*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0270*/ BRA 0x270; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10SomeKernelPiS_iiii
.globl _Z10SomeKernelPiS_iiii
.p2align 8
.type _Z10SomeKernelPiS_iiii,@function
_Z10SomeKernelPiS_iiii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x1c
s_load_b32 s8, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_mul_lo_u32 v0, v1, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v5, s3, v0
v_cmp_gt_i32_e64 s2, s8, v0
v_cmp_lt_u32_e32 vcc_lo, v0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_10
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x14
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s1, 0
s_waitcnt lgkmcnt(0)
s_add_i32 s0, s3, -1
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s2, s0, v[0:1]
v_mad_u64_u32 v[3:4], null, s3, s2, v[0:1]
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s0
v_add_nc_u32_e32 v0, 1, v0
v_add_nc_u32_e32 v2, 1, v2
v_add_nc_u32_e32 v3, 1, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_ge_u32_e32 vcc_lo, v0, v5
v_cmp_le_i32_e64 s0, s8, v0
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s0, exec_lo, s0
s_or_b32 s1, s0, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_10
.LBB0_3:
s_mov_b32 s0, 0
s_mov_b32 s10, 0
s_mov_b32 s11, s3
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_6
.p2align 6
.LBB0_4:
v_add_nc_u32_e32 v6, s10, v3
v_add_nc_u32_e32 v8, s10, v2
s_and_not1_b32 s12, s12, exec_lo
s_add_i32 s11, s11, -1
s_sub_i32 s10, s10, s2
v_ashrrev_i32_e32 v7, 31, v6
v_ashrrev_i32_e32 v9, 31, v8
s_mov_b32 s13, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v8, vcc_lo, s6, v8
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
s_clause 0x1
global_load_b32 v1, v[6:7], off
global_load_b32 v4, v[8:9], off
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v1, v1, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v4, 0, v1
v_max_i32_e32 v1, v1, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_lt_u32_e32 vcc_lo, 10, v1
s_and_b32 s14, vcc_lo, exec_lo
s_or_b32 s12, s12, s14
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
s_and_b32 s14, exec_lo, s12
v_mov_b32_e32 v4, s11
s_or_b32 s0, s14, s0
s_and_not1_b32 s9, s9, exec_lo
s_and_b32 s13, s13, exec_lo
s_or_b32 s9, s9, s13
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execz .LBB0_8
.LBB0_6:
s_or_b32 s12, s12, exec_lo
s_cmp_lt_i32 s11, 1
s_cbranch_scc0 .LBB0_4
s_mov_b32 s13, -1
s_branch .LBB0_5
.LBB0_8:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s0
s_xor_b32 s0, s9, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s9, s0
s_xor_b32 s0, exec_lo, s9
s_cbranch_execz .LBB0_2
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[0:1]
v_add_co_u32 v6, vcc_lo, s4, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo
global_store_b32 v[6:7], v4, off
s_branch .LBB0_2
.LBB0_10:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10SomeKernelPiS_iiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10SomeKernelPiS_iiii, .Lfunc_end0-_Z10SomeKernelPiS_iiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10SomeKernelPiS_iiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10SomeKernelPiS_iiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <ctime>
#define BLOCKS_NUM 4
#define BLOCK_SIZE 256
#define DATA_TYPE int
__global__ void reduce( DATA_TYPE* in, DATA_TYPE* out ){
__shared__ int data[BLOCK_SIZE];
int tid = threadIdx.x;
int i = 2 * blockIdx.x * blockDim.x + threadIdx.x;
data[ tid ] = in [ i ] + in[ i + blockDim.x ];
__syncthreads();
for ( int s = blockDim.x / 2; s > 0; s >>= 1 ){
if ( tid < s) data[tid] += data[tid + s];
__syncthreads();
}
__syncthreads();
if ( tid == 0 ) out[blockIdx.x] = data[0];
}
using namespace std;
int main(){
DATA_TYPE nums[ BLOCKS_NUM * BLOCK_SIZE ];
int res = 0;
srand(time(0));
for( int i = 0; i < BLOCKS_NUM * BLOCK_SIZE; i++ ){
if ( i < 1000 ) nums[ i ] = rand()%100 - 50;
else nums[ i ] = 0;
res += nums[ i ];
}
cout << "For summ: " << res << endl;
cudaSetDevice( 0 );
DATA_TYPE* in;
DATA_TYPE* out;
unsigned int in_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM * BLOCK_SIZE;
unsigned int out_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM;
cudaMalloc( ( void** ) &in, in_memory_size );
cudaMalloc( ( void** ) &out, out_memory_size );
cudaMemcpy( in, nums, in_memory_size, cudaMemcpyHostToDevice );
dim3 block( BLOCK_SIZE );
dim3 grid( BLOCKS_NUM );
reduce<<< grid, block >>>( in, out );
cudaDeviceSynchronize();
cudaMemcpy( nums, out, out_memory_size, cudaMemcpyDeviceToHost );
res = 0;
for (int i = 0; i < 2; i++) res += nums[i];
cout << "CUDA summ: " << res << endl;
cin.get();
cudaFree( in );
cudaFree( out );
return 0;
} | code for sm_80
Function : _Z6reducePiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e620000002100 */
/*0050*/ SHF.L.U32 R0, R6, 0x1, RZ ; /* 0x0000000106007819 */
/* 0x001fca00000006ff */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */
/* 0x002fc800078e0207 */
/*0070*/ IMAD.WIDE R2, R0.reuse, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x040fe200078e0205 */
/*0080*/ IADD3 R4, R0, c[0x0][0x0], RZ ; /* 0x0000000000047a10 */
/* 0x000fca0007ffe0ff */
/*0090*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe200078e0005 */
/*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000eaa000c1e1900 */
/*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*00d0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*00e0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00f0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe4000bf25270 */
/*0100*/ IADD3 R0, R4, R3, RZ ; /* 0x0000000304007210 */
/* 0x004fca0007ffe0ff */
/*0110*/ STS [R7.X4], R0 ; /* 0x0000000007007388 */
/* 0x0001e80000004800 */
/*0120*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0130*/ @!P1 BRA 0x200 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*0140*/ IMAD.SHL.U32 R0, R7, 0x4, RZ ; /* 0x0000000407007824 */
/* 0x001fe200078e00ff */
/*0150*/ MOV R3, UR4 ; /* 0x0000000400037c02 */
/* 0x000fc80008000f00 */
/*0160*/ ISETP.GE.AND P1, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fda0003f26270 */
/*0170*/ @!P1 IMAD R2, R3, 0x4, R0 ; /* 0x0000000403029824 */
/* 0x000fe200078e0200 */
/*0180*/ @!P1 LDS R4, [R7.X4] ; /* 0x0000000007049984 */
/* 0x000fe20000004800 */
/*0190*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*01a0*/ @!P1 LDS R5, [R2] ; /* 0x0000000002059984 */
/* 0x000e240000000800 */
/*01b0*/ @!P1 IADD3 R4, R4, R5, RZ ; /* 0x0000000504049210 */
/* 0x001fca0007ffe0ff */
/*01c0*/ @!P1 STS [R7.X4], R4 ; /* 0x0000000407009388 */
/* 0x0001e80000004800 */
/*01d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01e0*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01f0*/ @P1 BRA 0x160 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*0200*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x001fec0000010000 */
/*0210*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0220*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*0230*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0240*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*0250*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0260*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0270*/ BRA 0x270; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <ctime>
#define BLOCKS_NUM 4
#define BLOCK_SIZE 256
#define DATA_TYPE int
__global__ void reduce( DATA_TYPE* in, DATA_TYPE* out ){
__shared__ int data[BLOCK_SIZE];
int tid = threadIdx.x;
int i = 2 * blockIdx.x * blockDim.x + threadIdx.x;
data[ tid ] = in [ i ] + in[ i + blockDim.x ];
__syncthreads();
for ( int s = blockDim.x / 2; s > 0; s >>= 1 ){
if ( tid < s) data[tid] += data[tid + s];
__syncthreads();
}
__syncthreads();
if ( tid == 0 ) out[blockIdx.x] = data[0];
}
using namespace std;
int main(){
DATA_TYPE nums[ BLOCKS_NUM * BLOCK_SIZE ];
int res = 0;
srand(time(0));
for( int i = 0; i < BLOCKS_NUM * BLOCK_SIZE; i++ ){
if ( i < 1000 ) nums[ i ] = rand()%100 - 50;
else nums[ i ] = 0;
res += nums[ i ];
}
cout << "For summ: " << res << endl;
cudaSetDevice( 0 );
DATA_TYPE* in;
DATA_TYPE* out;
unsigned int in_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM * BLOCK_SIZE;
unsigned int out_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM;
cudaMalloc( ( void** ) &in, in_memory_size );
cudaMalloc( ( void** ) &out, out_memory_size );
cudaMemcpy( in, nums, in_memory_size, cudaMemcpyHostToDevice );
dim3 block( BLOCK_SIZE );
dim3 grid( BLOCKS_NUM );
reduce<<< grid, block >>>( in, out );
cudaDeviceSynchronize();
cudaMemcpy( nums, out, out_memory_size, cudaMemcpyDeviceToHost );
res = 0;
for (int i = 0; i < 2; i++) res += nums[i];
cout << "CUDA summ: " << res << endl;
cin.get();
cudaFree( in );
cudaFree( out );
return 0;
} | .file "tmpxft_0005ca8b_00000000-6_kernel.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z6reducePiS_PiS_
.type _Z27__device_stub__Z6reducePiS_PiS_, @function
_Z27__device_stub__Z6reducePiS_PiS_:
.LFB3694:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z6reducePiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z27__device_stub__Z6reducePiS_PiS_, .-_Z27__device_stub__Z6reducePiS_PiS_
.globl _Z6reducePiS_
.type _Z6reducePiS_, @function
_Z6reducePiS_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z6reducePiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z6reducePiS_, .-_Z6reducePiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "For summ: "
.LC1:
.string "CUDA summ: "
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $4096, %rsp
.cfi_def_cfa_offset 4120
orq $0, (%rsp)
subq $72, %rsp
.cfi_def_cfa_offset 4192
movq %fs:40, %rax
movq %rax, 4152(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl $0, %ebx
movl $0, %ebp
jmp .L15
.L12:
movl $0, 48(%rsp,%rbx,4)
addq $1, %rbx
cmpq $1024, %rbx
je .L19
.L15:
cmpl $999, %ebx
jg .L12
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $100, %edx, %edx
subl %edx, %eax
subl $50, %eax
movl %eax, 48(%rsp,%rbx,4)
addl %eax, %ebp
addq $1, %rbx
jmp .L15
.L19:
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %edi
call cudaSetDevice@PLT
leaq 8(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $16, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $4096, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 24(%rsp)
movl $1, 28(%rsp)
movl $4, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
call cudaDeviceSynchronize@PLT
leaq 48(%rsp), %rdi
movl $2, %ecx
movl $16, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl 48(%rsp), %ebx
addl 52(%rsp), %ebx
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq _ZSt3cin(%rip), %rdi
call _ZNSi3getEv@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 4152(%rsp), %rax
subq %fs:40, %rax
jne .L21
movl $0, %eax
addq $4168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z27__device_stub__Z6reducePiS_PiS_
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z6reducePiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z6reducePiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <ctime>
#define BLOCKS_NUM 4
#define BLOCK_SIZE 256
#define DATA_TYPE int
__global__ void reduce( DATA_TYPE* in, DATA_TYPE* out ){
__shared__ int data[BLOCK_SIZE];
int tid = threadIdx.x;
int i = 2 * blockIdx.x * blockDim.x + threadIdx.x;
data[ tid ] = in [ i ] + in[ i + blockDim.x ];
__syncthreads();
for ( int s = blockDim.x / 2; s > 0; s >>= 1 ){
if ( tid < s) data[tid] += data[tid + s];
__syncthreads();
}
__syncthreads();
if ( tid == 0 ) out[blockIdx.x] = data[0];
}
using namespace std;
int main(){
DATA_TYPE nums[ BLOCKS_NUM * BLOCK_SIZE ];
int res = 0;
srand(time(0));
for( int i = 0; i < BLOCKS_NUM * BLOCK_SIZE; i++ ){
if ( i < 1000 ) nums[ i ] = rand()%100 - 50;
else nums[ i ] = 0;
res += nums[ i ];
}
cout << "For summ: " << res << endl;
cudaSetDevice( 0 );
DATA_TYPE* in;
DATA_TYPE* out;
unsigned int in_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM * BLOCK_SIZE;
unsigned int out_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM;
cudaMalloc( ( void** ) &in, in_memory_size );
cudaMalloc( ( void** ) &out, out_memory_size );
cudaMemcpy( in, nums, in_memory_size, cudaMemcpyHostToDevice );
dim3 block( BLOCK_SIZE );
dim3 grid( BLOCKS_NUM );
reduce<<< grid, block >>>( in, out );
cudaDeviceSynchronize();
cudaMemcpy( nums, out, out_memory_size, cudaMemcpyDeviceToHost );
res = 0;
for (int i = 0; i < 2; i++) res += nums[i];
cout << "CUDA summ: " << res << endl;
cin.get();
cudaFree( in );
cudaFree( out );
return 0;
} | #include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <ctime>
#define BLOCKS_NUM 4
#define BLOCK_SIZE 256
#define DATA_TYPE int
__global__ void reduce( DATA_TYPE* in, DATA_TYPE* out ){
__shared__ int data[BLOCK_SIZE];
int tid = threadIdx.x;
int i = 2 * blockIdx.x * blockDim.x + threadIdx.x;
data[ tid ] = in [ i ] + in[ i + blockDim.x ];
__syncthreads();
for ( int s = blockDim.x / 2; s > 0; s >>= 1 ){
if ( tid < s) data[tid] += data[tid + s];
__syncthreads();
}
__syncthreads();
if ( tid == 0 ) out[blockIdx.x] = data[0];
}
using namespace std;
int main(){
DATA_TYPE nums[ BLOCKS_NUM * BLOCK_SIZE ];
int res = 0;
srand(time(0));
for( int i = 0; i < BLOCKS_NUM * BLOCK_SIZE; i++ ){
if ( i < 1000 ) nums[ i ] = rand()%100 - 50;
else nums[ i ] = 0;
res += nums[ i ];
}
cout << "For summ: " << res << endl;
hipSetDevice( 0 );
DATA_TYPE* in;
DATA_TYPE* out;
unsigned int in_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM * BLOCK_SIZE;
unsigned int out_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM;
hipMalloc( ( void** ) &in, in_memory_size );
hipMalloc( ( void** ) &out, out_memory_size );
hipMemcpy( in, nums, in_memory_size, hipMemcpyHostToDevice );
dim3 block( BLOCK_SIZE );
dim3 grid( BLOCKS_NUM );
reduce<<< grid, block >>>( in, out );
hipDeviceSynchronize();
hipMemcpy( nums, out, out_memory_size, hipMemcpyDeviceToHost );
res = 0;
for (int i = 0; i < 2; i++) res += nums[i];
cout << "CUDA summ: " << res << endl;
cin.get();
hipFree( in );
hipFree( out );
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <ctime>
#define BLOCKS_NUM 4
#define BLOCK_SIZE 256
#define DATA_TYPE int
__global__ void reduce( DATA_TYPE* in, DATA_TYPE* out ){
__shared__ int data[BLOCK_SIZE];
int tid = threadIdx.x;
int i = 2 * blockIdx.x * blockDim.x + threadIdx.x;
data[ tid ] = in [ i ] + in[ i + blockDim.x ];
__syncthreads();
for ( int s = blockDim.x / 2; s > 0; s >>= 1 ){
if ( tid < s) data[tid] += data[tid + s];
__syncthreads();
}
__syncthreads();
if ( tid == 0 ) out[blockIdx.x] = data[0];
}
using namespace std;
int main(){
DATA_TYPE nums[ BLOCKS_NUM * BLOCK_SIZE ];
int res = 0;
srand(time(0));
for( int i = 0; i < BLOCKS_NUM * BLOCK_SIZE; i++ ){
if ( i < 1000 ) nums[ i ] = rand()%100 - 50;
else nums[ i ] = 0;
res += nums[ i ];
}
cout << "For summ: " << res << endl;
hipSetDevice( 0 );
DATA_TYPE* in;
DATA_TYPE* out;
unsigned int in_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM * BLOCK_SIZE;
unsigned int out_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM;
hipMalloc( ( void** ) &in, in_memory_size );
hipMalloc( ( void** ) &out, out_memory_size );
hipMemcpy( in, nums, in_memory_size, hipMemcpyHostToDevice );
dim3 block( BLOCK_SIZE );
dim3 grid( BLOCKS_NUM );
reduce<<< grid, block >>>( in, out );
hipDeviceSynchronize();
hipMemcpy( nums, out, out_memory_size, hipMemcpyDeviceToHost );
res = 0;
for (int i = 0; i < 2; i++) res += nums[i];
cout << "CUDA summ: " << res << endl;
cin.get();
hipFree( in );
hipFree( out );
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6reducePiS_
.globl _Z6reducePiS_
.p2align 8
.type _Z6reducePiS_,@function
_Z6reducePiS_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mul_i32 s6, s15, s3
s_cmp_lt_u32 s3, 2
v_lshl_add_u32 v1, s6, 1, v0
v_dual_mov_b32 v4, 0 :: v_dual_add_nc_u32 v3, s3, v1
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_clause 0x1
global_load_b32 v2, v[1:2], off
global_load_b32 v3, v[3:4], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB0_2
.p2align 6
.LBB0_1:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB0_2:
buffer_gl0_inv
s_cbranch_scc1 .LBB0_5
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB0_1
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB0_1
.LBB0_5:
s_mov_b32 s3, 0
s_barrier
buffer_gl0_inv
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6reducePiS_
.amdhsa_group_segment_fixed_size 1024
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6reducePiS_, .Lfunc_end0-_Z6reducePiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 1024
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6reducePiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6reducePiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <ctime>
#define BLOCKS_NUM 4
#define BLOCK_SIZE 256
#define DATA_TYPE int
__global__ void reduce( DATA_TYPE* in, DATA_TYPE* out ){
__shared__ int data[BLOCK_SIZE];
int tid = threadIdx.x;
int i = 2 * blockIdx.x * blockDim.x + threadIdx.x;
data[ tid ] = in [ i ] + in[ i + blockDim.x ];
__syncthreads();
for ( int s = blockDim.x / 2; s > 0; s >>= 1 ){
if ( tid < s) data[tid] += data[tid + s];
__syncthreads();
}
__syncthreads();
if ( tid == 0 ) out[blockIdx.x] = data[0];
}
using namespace std;
int main(){
DATA_TYPE nums[ BLOCKS_NUM * BLOCK_SIZE ];
int res = 0;
srand(time(0));
for( int i = 0; i < BLOCKS_NUM * BLOCK_SIZE; i++ ){
if ( i < 1000 ) nums[ i ] = rand()%100 - 50;
else nums[ i ] = 0;
res += nums[ i ];
}
cout << "For summ: " << res << endl;
hipSetDevice( 0 );
DATA_TYPE* in;
DATA_TYPE* out;
unsigned int in_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM * BLOCK_SIZE;
unsigned int out_memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM;
hipMalloc( ( void** ) &in, in_memory_size );
hipMalloc( ( void** ) &out, out_memory_size );
hipMemcpy( in, nums, in_memory_size, hipMemcpyHostToDevice );
dim3 block( BLOCK_SIZE );
dim3 grid( BLOCKS_NUM );
reduce<<< grid, block >>>( in, out );
hipDeviceSynchronize();
hipMemcpy( nums, out, out_memory_size, hipMemcpyDeviceToHost );
res = 0;
for (int i = 0; i < 2; i++) res += nums[i];
cout << "CUDA summ: " << res << endl;
cin.get();
hipFree( in );
hipFree( out );
return 0;
} | .text
.file "kernel.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z21__device_stub__reducePiS_ # -- Begin function _Z21__device_stub__reducePiS_
.p2align 4, 0x90
.type _Z21__device_stub__reducePiS_,@function
_Z21__device_stub__reducePiS_: # @_Z21__device_stub__reducePiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z6reducePiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z21__device_stub__reducePiS_, .Lfunc_end0-_Z21__device_stub__reducePiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $4200, %rsp # imm = 0x1068
.cfi_def_cfa_offset 4224
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
xorl %r14d, %r14d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
xorl %ebx, %ebx
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_3: # in Loop: Header=BB1_1 Depth=1
movl %eax, 96(%rsp,%r14,4)
addl %eax, %ebx
incq %r14
cmpq $1024, %r14 # imm = 0x400
je .LBB1_4
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $0, %eax
cmpq $999, %r14 # imm = 0x3E7
ja .LBB1_3
# %bb.2: # in Loop: Header=BB1_1 Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
negl %ecx
addl %ecx, %eax
addl $-50, %eax
jmp .LBB1_3
.LBB1_4:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %ebx, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_7
# %bb.6:
movzbl 67(%rbx), %ecx
jmp .LBB1_8
.LBB1_7:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %ebx, %ebx
xorl %edi, %edi
callq hipSetDevice
leaq 8(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
movq %rsp, %rdi
movl $16, %esi
callq hipMalloc
movq 8(%rsp), %rdi
leaq 96(%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movl $1, %ecx
callq hipMemcpy
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 252(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_10
# %bb.9:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6reducePiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_10:
callq hipDeviceSynchronize
movq (%rsp), %rsi
leaq 96(%rsp), %rdi
movl $16, %edx
movl $2, %ecx
callq hipMemcpy
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_11: # =>This Inner Loop Header: Depth=1
addl 96(%rsp,%rax,4), %ebx
incq %rax
cmpq $1, %rax
je .LBB1_11
# %bb.12:
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $11, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %ebx, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i26
cmpb $0, 56(%rbx)
je .LBB1_15
# %bb.14:
movzbl 67(%rbx), %ecx
jmp .LBB1_16
.LBB1_15:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_16: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit29
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt3cin, %edi
callq _ZNSi3getEv
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $4200, %rsp # imm = 0x1068
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB1_17:
.cfi_def_cfa_offset 4224
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6reducePiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6reducePiS_,@object # @_Z6reducePiS_
.section .rodata,"a",@progbits
.globl _Z6reducePiS_
.p2align 3, 0x0
_Z6reducePiS_:
.quad _Z21__device_stub__reducePiS_
.size _Z6reducePiS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "For summ: "
.size .L.str, 11
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "CUDA summ: "
.size .L.str.1, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6reducePiS_"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__reducePiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6reducePiS_
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6reducePiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e620000002100 */
/*0050*/ SHF.L.U32 R0, R6, 0x1, RZ ; /* 0x0000000106007819 */
/* 0x001fca00000006ff */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */
/* 0x002fc800078e0207 */
/*0070*/ IMAD.WIDE R2, R0.reuse, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x040fe200078e0205 */
/*0080*/ IADD3 R4, R0, c[0x0][0x0], RZ ; /* 0x0000000000047a10 */
/* 0x000fca0007ffe0ff */
/*0090*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe200078e0005 */
/*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000eaa000c1e1900 */
/*00b0*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000800 */
/*00d0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*00e0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00f0*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe4000bf25270 */
/*0100*/ IADD3 R0, R4, R3, RZ ; /* 0x0000000304007210 */
/* 0x004fca0007ffe0ff */
/*0110*/ STS [R7.X4], R0 ; /* 0x0000000007007388 */
/* 0x0001e80000004800 */
/*0120*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0130*/ @!P1 BRA 0x200 ; /* 0x000000c000009947 */
/* 0x000fea0003800000 */
/*0140*/ IMAD.SHL.U32 R0, R7, 0x4, RZ ; /* 0x0000000407007824 */
/* 0x001fe200078e00ff */
/*0150*/ MOV R3, UR4 ; /* 0x0000000400037c02 */
/* 0x000fc80008000f00 */
/*0160*/ ISETP.GE.AND P1, PT, R7, R3, PT ; /* 0x000000030700720c */
/* 0x000fda0003f26270 */
/*0170*/ @!P1 IMAD R2, R3, 0x4, R0 ; /* 0x0000000403029824 */
/* 0x000fe200078e0200 */
/*0180*/ @!P1 LDS R4, [R7.X4] ; /* 0x0000000007049984 */
/* 0x000fe20000004800 */
/*0190*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc60000011603 */
/*01a0*/ @!P1 LDS R5, [R2] ; /* 0x0000000002059984 */
/* 0x000e240000000800 */
/*01b0*/ @!P1 IADD3 R4, R4, R5, RZ ; /* 0x0000000504049210 */
/* 0x001fca0007ffe0ff */
/*01c0*/ @!P1 STS [R7.X4], R4 ; /* 0x0000000407009388 */
/* 0x0001e80000004800 */
/*01d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01e0*/ ISETP.NE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f25270 */
/*01f0*/ @P1 BRA 0x160 ; /* 0xffffff6000001947 */
/* 0x001fea000383ffff */
/*0200*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x001fec0000010000 */
/*0210*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0220*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*0230*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fc800078e00ff */
/*0240*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*0250*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0260*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0270*/ BRA 0x270; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6reducePiS_
.globl _Z6reducePiS_
.p2align 8
.type _Z6reducePiS_,@function
_Z6reducePiS_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x1c
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mul_i32 s6, s15, s3
s_cmp_lt_u32 s3, 2
v_lshl_add_u32 v1, s6, 1, v0
v_dual_mov_b32 v4, 0 :: v_dual_add_nc_u32 v3, s3, v1
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_clause 0x1
global_load_b32 v2, v[1:2], off
global_load_b32 v3, v[3:4], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB0_2
.p2align 6
.LBB0_1:
s_or_b32 exec_lo, exec_lo, s5
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s3, 4
s_mov_b32 s3, s4
.LBB0_2:
buffer_gl0_inv
s_cbranch_scc1 .LBB0_5
s_lshr_b32 s4, s3, 1
s_mov_b32 s5, exec_lo
v_cmpx_gt_u32_e64 s4, v0
s_cbranch_execz .LBB0_1
v_add_lshl_u32 v2, s4, v0, 2
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB0_1
.LBB0_5:
s_mov_b32 s3, 0
s_barrier
buffer_gl0_inv
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6reducePiS_
.amdhsa_group_segment_fixed_size 1024
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6reducePiS_, .Lfunc_end0-_Z6reducePiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 1024
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6reducePiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6reducePiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0005ca8b_00000000-6_kernel.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z6reducePiS_PiS_
.type _Z27__device_stub__Z6reducePiS_PiS_, @function
_Z27__device_stub__Z6reducePiS_PiS_:
.LFB3694:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z6reducePiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z27__device_stub__Z6reducePiS_PiS_, .-_Z27__device_stub__Z6reducePiS_PiS_
.globl _Z6reducePiS_
.type _Z6reducePiS_, @function
_Z6reducePiS_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z6reducePiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z6reducePiS_, .-_Z6reducePiS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "For summ: "
.LC1:
.string "CUDA summ: "
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $4096, %rsp
.cfi_def_cfa_offset 4120
orq $0, (%rsp)
subq $72, %rsp
.cfi_def_cfa_offset 4192
movq %fs:40, %rax
movq %rax, 4152(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl $0, %ebx
movl $0, %ebp
jmp .L15
.L12:
movl $0, 48(%rsp,%rbx,4)
addq $1, %rbx
cmpq $1024, %rbx
je .L19
.L15:
cmpl $999, %ebx
jg .L12
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $100, %edx, %edx
subl %edx, %eax
subl $50, %eax
movl %eax, 48(%rsp,%rbx,4)
addl %eax, %ebp
addq $1, %rbx
jmp .L15
.L19:
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebp, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %edi
call cudaSetDevice@PLT
leaq 8(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $16, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $4096, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 24(%rsp)
movl $1, 28(%rsp)
movl $4, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
call cudaDeviceSynchronize@PLT
leaq 48(%rsp), %rdi
movl $2, %ecx
movl $16, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movl 48(%rsp), %ebx
addl 52(%rsp), %ebx
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq _ZSt3cin(%rip), %rdi
call _ZNSi3getEv@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 4152(%rsp), %rax
subq %fs:40, %rax
jne .L21
movl $0, %eax
addq $4168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z27__device_stub__Z6reducePiS_PiS_
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z6reducePiS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z6reducePiS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z21__device_stub__reducePiS_ # -- Begin function _Z21__device_stub__reducePiS_
.p2align 4, 0x90
.type _Z21__device_stub__reducePiS_,@function
_Z21__device_stub__reducePiS_: # @_Z21__device_stub__reducePiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z6reducePiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z21__device_stub__reducePiS_, .Lfunc_end0-_Z21__device_stub__reducePiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $4200, %rsp # imm = 0x1068
.cfi_def_cfa_offset 4224
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
xorl %r14d, %r14d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
xorl %ebx, %ebx
jmp .LBB1_1
.p2align 4, 0x90
.LBB1_3: # in Loop: Header=BB1_1 Depth=1
movl %eax, 96(%rsp,%r14,4)
addl %eax, %ebx
incq %r14
cmpq $1024, %r14 # imm = 0x400
je .LBB1_4
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl $0, %eax
cmpq $999, %r14 # imm = 0x3E7
ja .LBB1_3
# %bb.2: # in Loop: Header=BB1_1 Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
negl %ecx
addl %ecx, %eax
addl $-50, %eax
jmp .LBB1_3
.LBB1_4:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %ebx, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_7
# %bb.6:
movzbl 67(%rbx), %ecx
jmp .LBB1_8
.LBB1_7:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %ebx, %ebx
xorl %edi, %edi
callq hipSetDevice
leaq 8(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
movq %rsp, %rdi
movl $16, %esi
callq hipMalloc
movq 8(%rsp), %rdi
leaq 96(%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movl $1, %ecx
callq hipMemcpy
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 252(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_10
# %bb.9:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6reducePiS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_10:
callq hipDeviceSynchronize
movq (%rsp), %rsi
leaq 96(%rsp), %rdi
movl $16, %edx
movl $2, %ecx
callq hipMemcpy
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_11: # =>This Inner Loop Header: Depth=1
addl 96(%rsp,%rax,4), %ebx
incq %rax
cmpq $1, %rax
je .LBB1_11
# %bb.12:
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $11, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %ebx, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i26
cmpb $0, 56(%rbx)
je .LBB1_15
# %bb.14:
movzbl 67(%rbx), %ecx
jmp .LBB1_16
.LBB1_15:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_16: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit29
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt3cin, %edi
callq _ZNSi3getEv
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $4200, %rsp # imm = 0x1068
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB1_17:
.cfi_def_cfa_offset 4224
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6reducePiS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6reducePiS_,@object # @_Z6reducePiS_
.section .rodata,"a",@progbits
.globl _Z6reducePiS_
.p2align 3, 0x0
_Z6reducePiS_:
.quad _Z21__device_stub__reducePiS_
.size _Z6reducePiS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "For summ: "
.size .L.str, 11
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "CUDA summ: "
.size .L.str.1, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6reducePiS_"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__reducePiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6reducePiS_
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //put C:/Users/molly/Desktop/289Q/project/main.cu
//nvcc -std=c++11 main.cu
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include <cooperative_groups.h>
#include <cooperative_groups.h>
// includes, project
#include <cuda.h>
#include <cuda_runtime.h>
using namespace cooperative_groups;
namespace cg = cooperative_groups;
// #define FILESIZE_CHAR 1048576
#define FILESIZE_CHAR 1048576
#define FILESIZE_INT FILESIZE_CHAR/4
__host__
void makeLUT(int N, int* LUT){
int M = N;
int even = 0;
int odd = 1;
int LUTsize = N*(log2((double)N)*2 - 2);
for (int i =0; i < LUTsize/2; i+=N){
for (int j=0; j<N; j+=M){
for (int k =0; k<M/2; k++){
LUT[i+j+k] = even;
even+=2;
}
for (int k =M/2; k<M; k++){
LUT[i+j+k] = odd;
odd+=2;
}
} even=0; odd=1; M = M/2;
}
for (int x=LUTsize-N, i=LUTsize/2; i<LUTsize;i+=N, x-=N){
for(int j=0; j<N; j++){
int newIndex = LUT[x+j-LUTsize/2];
LUT[newIndex + i] = j;
}
}
return;
}
int createMask(int n)
{
int r = 0;
for (int i=0; i<n; i++)
r |= 1 << i;
return r;
}
__global__
void benes(int N, int block, char* network, int* LUT, volatile int* valid, int mask, int* data, char* output){
int idx = threadIdx.x;
int in1, in2, in1_index, in2_index;
int readOffset=0;
int fileSize = FILESIZE_INT/2;
int readOffsetSecondNet=fileSize;
thread_group g = tiled_partition(this_thread_block(), 2); //stops working after 32?
if(blockIdx.x == 0){
while(readOffset < fileSize){
in1 = data[idx*2 + readOffset];
in2 = data[idx*2+1 + readOffset];
readOffset+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
g.sync();
// __syncthreads();
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
}
}
else if ( blockIdx.x < block) {
while(readOffset < fileSize){
while((valid[idx + (blockIdx.x)*(N/2)])==0);
in1_index = LUT[idx*2 + (blockIdx.x-1)*N];
in2_index = LUT[idx*2 + (blockIdx.x-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
valid[idx + (blockIdx.x)*(N/2)] = 0;// valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
if (blockIdx.x != gridDim.x - 1 && blockIdx.x != block-1){
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
g.sync();
// __syncthreads();
}
else {
output[idx*2 + readOffset] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffset] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffset += N;
}
}
else if(blockIdx.x == block){
while(readOffsetSecondNet < FILESIZE_INT){
in1 = data[idx*2 + readOffsetSecondNet];
in2 = data[idx*2+1 + readOffsetSecondNet];
readOffsetSecondNet+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
}
}
else{
while(readOffsetSecondNet < FILESIZE_INT){
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
while((valid[idx + (blockIdx.x)*(N/2)])==0);
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
in1_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N];
in2_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
// printf("Block %d thread %d consumed %d %d\n", blockIdx.x,threadIdx.x, in1, in2);
valid[idx + (blockIdx.x)*(N/2)] = 0; //valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
//printf("waiting for next block %d to consume\n", blockIdx.x + 1);
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
// printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
//printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
if (blockIdx.x != gridDim.x - 1){
valid[idx + (blockIdx.x+1)*(N/2)]=1; //valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
//printf("valid:%d index:%d\n",valid[idx + (blockIdx.x+1)*N],idx + (blockIdx.x+1)*N);
}
else {
output[idx*2 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffsetSecondNet += N;
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Usage: %s <input.txt> <size>\n", argv[0]);
return 1;
}
std::ifstream file(argv[1], std::ios::binary);
if (!file) {
printf("Could not open input file\n");
return 1;
}
int N = atoi(argv[2]);
if (FILESIZE_INT<N)
N = FILESIZE_INT;
int blockSize = N/2;
int blocks = 2*log2((double)N)-1;
int b = 2*log2((double)N)-1;
int LUTsize = N*(log2((double)N)*2 - 2);
int numBlocks;
if (FILESIZE_INT <= N)
numBlocks = blocks;
else
numBlocks = 2*blocks;
char* network;
cudaMallocManaged(&network,N*(numBlocks+1)*sizeof(char));
memset(network,0,N*(numBlocks+1)*sizeof(char));
int* LUT;
cudaMallocManaged(&LUT,LUTsize*sizeof(int));
makeLUT(N,LUT);
int mask = createMask(log2((double)N));
int *valid;
cudaMallocManaged(&valid,(N/2)*(numBlocks)*sizeof(int));
memset(valid,0,(N/2)*(numBlocks+1)*sizeof(int));
for(int i = 0; i < N/2; i++)
valid[i] = 1;
char* data;
cudaMallocManaged(&data,FILESIZE_CHAR*sizeof(char));
memset(data,0,FILESIZE_CHAR*sizeof(char));
file.read(data, FILESIZE_CHAR*sizeof(char));
file.close();
int* idata;
cudaMallocManaged(&idata,FILESIZE_CHAR*sizeof(char));
memcpy(idata, data, FILESIZE_CHAR*sizeof(char));
char* output;
cudaMallocManaged(&output,FILESIZE_CHAR*sizeof(char));
memset(output,0,FILESIZE_CHAR*sizeof(char));
benes<<<numBlocks,blockSize>>>(N, blocks, network, LUT, valid, mask, idata, output);
cudaDeviceSynchronize();
// printf("The input is:");
// for (int i = 0; i < FILESIZE_INT; i++){
// if (i%N == 0) printf("\n");
// printf("%d ", idata[i]);
// }
// printf("\n\n");
for (int i = 0; i < FILESIZE_INT-1; i++){
if ((i%N != N-1) && (output[i+1]!=0)) {
if((mask & output[i+1]) < (mask & output[i])){
printf("ERROR in routing at output %d %d %d\n",i ,mask & output[i+1],mask &output[i] );
return 1;
}
}
}
printf("Routing was successful!\n");
cudaFree(valid);
cudaFree(LUT);
cudaFree(network);
cudaFree(data);
cudaFree(idata);
cudaFree(output);
} | code for sm_80
Function : _Z5benesiiPcPiPViiS0_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_TID.Z ; /* 0x0000000000037919 */
/* 0x000e220000002300 */
/*0020*/ IMAD.MOV.U32 R5, RZ, RZ, -0x1 ; /* 0xffffffffff057424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002200 */
/*0050*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */
/* 0x000e680000002100 */
/*0060*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000ea80000002500 */
/*0070*/ S2R R2, SR_LANEID ; /* 0x0000000000027919 */
/* 0x000ee20000000000 */
/*0080*/ IMAD R3, R3, c[0x0][0x4], R0 ; /* 0x0000010003037a24 */
/* 0x001fc400078e0200 */
/*0090*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */
/* 0x000fe400078e00ff */
/*00a0*/ IMAD R3, R3, c[0x0][0x0], R10 ; /* 0x0000000003037a24 */
/* 0x002fe400078e020a */
/*00b0*/ IMAD R0, R0, c[0x0][0x4], RZ ; /* 0x0000010000007a24 */
/* 0x000fe200078e02ff */
/*00c0*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x004fe40003f05270 */
/*00d0*/ LOP3.LUT R3, R3, 0xfffffffe, RZ, 0xc0, !PT ; /* 0xfffffffe03037812 */
/* 0x000fca00078ec0ff */
/*00e0*/ IMAD R0, R0, c[0x0][0x8], -R3 ; /* 0x0000020000007a24 */
/* 0x000fe200078e0a03 */
/*00f0*/ LOP3.LUT R3, R2, 0xfffffffe, RZ, 0xc0, !PT ; /* 0xfffffffe02037812 */
/* 0x008fc800078ec0ff */
/*0100*/ IMNMX.U32 R0, R0, 0x2, PT ; /* 0x0000000200007817 */
/* 0x000fc80003800000 */
/*0110*/ IADD3 R0, -R0, 0x20, RZ ; /* 0x0000002000007810 */
/* 0x000fc80007ffe1ff */
/*0120*/ SHF.R.U32.HI R0, RZ, R0, R5 ; /* 0x00000000ff007219 */
/* 0x000fc80000011605 */
/*0130*/ SHF.L.U32 R0, R0, R3, RZ ; /* 0x0000000300007219 */
/* 0x000fe200000006ff */
/*0140*/ @!P0 BRA 0xf30 ; /* 0x00000de000008947 */
/* 0x000fea0003800000 */
/*0150*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff0e7624 */
/* 0x000fe200078e00ff */
/*0160*/ ISETP.GE.U32.AND P0, PT, R9.reuse, c[0x0][0x164], PT ; /* 0x0000590009007a0c */
/* 0x040fe20003f06070 */
/*0170*/ IMAD.SHL.U32 R8, R10, 0x2, RZ ; /* 0x000000020a087824 */
/* 0x000fe400078e00ff */
/*0180*/ IMAD R3, R9, R14, c[0x0][0x160] ; /* 0x0000580009037624 */
/* 0x000fc800078e020e */
/*0190*/ IMAD.IADD R3, R3, 0x1, R8 ; /* 0x0000000103037824 */
/* 0x000fca00078e0208 */
/*01a0*/ IADD3 R4, R3.reuse, 0x1, RZ ; /* 0x0000000103047810 */
/* 0x040fe40007ffe0ff */
/*01b0*/ IADD3 R2, P1, R3, c[0x0][0x168], RZ ; /* 0x00005a0003027a10 */
/* 0x000fe40007f3e0ff */
/*01c0*/ IADD3 R4, P2, R4, c[0x0][0x168], RZ ; /* 0x00005a0004047a10 */
/* 0x000fc60007f5e0ff */
/*01d0*/ IMAD.X R3, RZ, RZ, c[0x0][0x16c], P1 ; /* 0x00005b00ff037624 */
/* 0x000fe400008e06ff */
/*01e0*/ IMAD.X R5, RZ, RZ, c[0x0][0x16c], P2 ; /* 0x00005b00ff057624 */
/* 0x000fe200010e06ff */
/*01f0*/ @!P0 BRA 0xa80 ; /* 0x0000088000008947 */
/* 0x000fea0003800000 */
/*0200*/ ISETP.NE.AND P0, PT, R9, c[0x0][0x164], PT ; /* 0x0000590009007a0c */
/* 0x000fda0003f05270 */
/*0210*/ @!P0 BRA 0x7a0 ; /* 0x0000058000008947 */
/* 0x000fea0003800000 */
/*0220*/ I2F.U32.RP R11, c[0x0][0x164] ; /* 0x00005900000b7b06 */
/* 0x000e220000209000 */
/*0230*/ ISETP.NE.U32.AND P1, PT, RZ, c[0x0][0x164], PT ; /* 0x00005900ff007a0c */
/* 0x000fe20003f25070 */
/*0240*/ IMAD.MOV.U32 R15, RZ, RZ, 0x4 ; /* 0x00000004ff0f7424 */
/* 0x000fe200078e00ff */
/*0250*/ ULDC UR4, c[0x0][0xc] ; /* 0x0000030000047ab9 */
/* 0x000fe20000000800 */
/*0260*/ IMAD.MOV.U32 R17, RZ, RZ, 0x20000 ; /* 0x00020000ff117424 */
/* 0x000fe200078e00ff */
/*0270*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fc6000fffe03f */
/*0280*/ MUFU.RCP R11, R11 ; /* 0x0000000b000b7308 */
/* 0x001e240000001000 */
/*0290*/ IADD3 R6, R11, 0xffffffe, RZ ; /* 0x0ffffffe0b067810 */
/* 0x001fcc0007ffe0ff */
/*02a0*/ F2I.FTZ.U32.TRUNC.NTZ R7, R6 ; /* 0x0000000600077305 */
/* 0x000064000021f000 */
/*02b0*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x001fe400078e00ff */
/*02c0*/ IMAD.MOV R13, RZ, RZ, -R7 ; /* 0x000000ffff0d7224 */
/* 0x002fc800078e0a07 */
/*02d0*/ IMAD R13, R13, c[0x0][0x164], RZ ; /* 0x000059000d0d7a24 */
/* 0x000fc800078e02ff */
/*02e0*/ IMAD.HI.U32 R12, R7, R13, R6 ; /* 0x0000000d070c7227 */
/* 0x000fe200078e0006 */
/*02f0*/ LEA.HI R6, R14, c[0x0][0x160], RZ, 0x1 ; /* 0x000058000e067a11 */
/* 0x000fc800078f08ff */
/*0300*/ SHF.R.S32.HI R6, RZ, 0x1, R6 ; /* 0x00000001ff067819 */
/* 0x000fe20000011406 */
/*0310*/ IMAD.HI.U32 R12, R12, R9, RZ ; /* 0x000000090c0c7227 */
/* 0x000fc800078e00ff */
/*0320*/ IMAD.MOV R12, RZ, RZ, -R12 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0a0c */
/*0330*/ IMAD R11, R6, R9, R10 ; /* 0x00000009060b7224 */
/* 0x000fe400078e020a */
/*0340*/ IMAD R7, R12, c[0x0][0x164], R9 ; /* 0x000059000c077a24 */
/* 0x000fe400078e0209 */
/*0350*/ IMAD.IADD R12, R6, 0x1, R11 ; /* 0x00000001060c7824 */
/* 0x000fe400078e020b */
/*0360*/ IMAD.WIDE.U32 R10, R11, R15, c[0x0][0x178] ; /* 0x00005e000b0a7625 */
/* 0x000fe200078e000f */
/*0370*/ ISETP.GE.U32.AND P0, PT, R7, c[0x0][0x164], PT ; /* 0x0000590007007a0c */
/* 0x000fc60003f06070 */
/*0380*/ IMAD.WIDE.U32 R12, R12, R15, c[0x0][0x178] ; /* 0x00005e000c0c7625 */
/* 0x000fd400078e000f */
/*0390*/ @P0 IADD3 R7, R7, -c[0x0][0x164], RZ ; /* 0x8000590007070a10 */
/* 0x000fc80007ffe0ff */
/*03a0*/ ISETP.GE.U32.AND P0, PT, R7, c[0x0][0x164], PT ; /* 0x0000590007007a0c */
/* 0x000fda0003f06070 */
/*03b0*/ @P0 IADD3 R7, R7, -c[0x0][0x164], RZ ; /* 0x8000590007070a10 */
/* 0x000fe40007ffe0ff */
/*03c0*/ @!P1 LOP3.LUT R7, RZ, c[0x0][0x164], RZ, 0x33, !PT ; /* 0x00005900ff079a12 */
/* 0x000fc800078e33ff */
/*03d0*/ IADD3 R7, R7, -0x1, RZ ; /* 0xffffffff07077810 */
/* 0x000fca0007ffe0ff */
/*03e0*/ IMAD R16, R7, c[0x0][0x160], R8 ; /* 0x0000580007107a24 */
/* 0x000fc800078e0208 */
/*03f0*/ IMAD.WIDE.U32 R14, R16, R15, c[0x0][0x170] ; /* 0x00005c00100e7625 */
/* 0x000fca00078e000f */
/*0400*/ BSSY B0, 0x460 ; /* 0x0000005000007945 */
/* 0x002fe40003800000 */
/*0410*/ LDG.E.STRONG.SYS R6, [R10.64] ; /* 0x000000080a067981 */
/* 0x000ea2000c1f5900 */
/*0420*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*0430*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x004fda0003f05270 */
/*0440*/ @!P0 BRA 0x410 ; /* 0xffffffc000008947 */
/* 0x000fea000383ffff */
/*0450*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0460*/ IADD3 R6, R16, 0x1, RZ ; /* 0x0000000110067810 */
/* 0x000fe20007ffe0ff */
/*0470*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe200078e00ff */
/*0480*/ LDG.E R18, [R14.64] ; /* 0x000000080e127981 */
/* 0x000ea6000c1e1900 */
/*0490*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fcc00078e0007 */
/*04a0*/ LDG.E R6, [R6.64] ; /* 0x0000000806067981 */
/* 0x000ee2000c1e1900 */
/*04b0*/ BSSY B0, 0x5a0 ; /* 0x000000e000007945 */
/* 0x000fe20003800000 */
/*04c0*/ IMAD R18, R9.reuse, c[0x0][0x160], R18 ; /* 0x0000580009127a24 */
/* 0x044fe400078e0212 */
/*04d0*/ IMAD R20, R9, c[0x0][0x160], R6 ; /* 0x0000580009147a24 */
/* 0x008fc600078e0206 */
/*04e0*/ IADD3 R18, P0, R18, c[0x0][0x168], RZ ; /* 0x00005a0012127a10 */
/* 0x000fe40007f1e0ff */
/*04f0*/ IADD3 R20, P1, R20, c[0x0][0x168], RZ ; /* 0x00005a0014147a10 */
/* 0x000fc60007f3e0ff */
/*0500*/ IMAD.X R19, RZ, RZ, c[0x0][0x16c], P0 ; /* 0x00005b00ff137624 */
/* 0x000fe400000e06ff */
/*0510*/ IMAD.X R21, RZ, RZ, c[0x0][0x16c], P1 ; /* 0x00005b00ff157624 */
/* 0x000fc600008e06ff */
/*0520*/ LDG.E.U8 R18, [R18.64] ; /* 0x0000000812127981 */
/* 0x000168000c1e1100 */
/*0530*/ LDG.E.U8 R21, [R20.64] ; /* 0x0000000814157981 */
/* 0x000168000c1e1100 */
/*0540*/ STG.E.STRONG.SYS [R10.64], RZ ; /* 0x000000ff0a007986 */
/* 0x0001e4000c115908 */
/*0550*/ LDG.E.STRONG.SYS R6, [R12.64] ; /* 0x000000080c067981 */
/* 0x000ea2000c1f5900 */
/*0560*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*0570*/ ISETP.NE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x004fda0003f05270 */
/*0580*/ @!P0 BRA 0x550 ; /* 0xffffffc000008947 */
/* 0x000fea000383ffff */
/*0590*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*05a0*/ PRMT R6, R18, 0x8880, RZ ; /* 0x0000888012067816 */
/* 0x020fe400000000ff */
/*05b0*/ PRMT R7, R21, 0x8880, RZ ; /* 0x0000888015077816 */
/* 0x000fe400000000ff */
/*05c0*/ LOP3.LUT R6, R6, c[0x0][0x180], RZ, 0xc0, !PT ; /* 0x0000600006067a12 */
/* 0x000fe400078ec0ff */
/*05d0*/ LOP3.LUT R7, R7, c[0x0][0x180], RZ, 0xc0, !PT ; /* 0x0000600007077a12 */
/* 0x000fc800078ec0ff */
/*05e0*/ ISETP.GE.AND P0, PT, R6, R7, PT ; /* 0x000000070600720c */
/* 0x000fc80003f06270 */
/*05f0*/ SEL R19, R18, R21, !P0 ; /* 0x0000001512137207 */
/* 0x001fe40004000000 */
/*0600*/ SEL R21, R21, R18, !P0 ; /* 0x0000001215157207 */
/* 0x000fe40004000000 */
/*0610*/ ISETP.NE.AND P0, PT, R9, UR4, PT ; /* 0x0000000409007c0c */
/* 0x000fe2000bf05270 */
/*0620*/ STG.E.U8 [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0001e8000c101108 */
/*0630*/ STG.E.U8 [R4.64], R21 ; /* 0x0000001504007986 */
/* 0x0001f0000c101108 */
/*0640*/ @!P0 BRA 0x700 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*0650*/ MATCH.ANY R19, R0 ; /* 0x00000000001373a1 */
/* 0x001e2200000e8000 */
/*0660*/ REDUX.OR UR5, R0 ; /* 0x00000000000573c4 */
/* 0x000e620000004000 */
/*0670*/ IMAD.MOV.U32 R7, RZ, RZ, 0x1 ; /* 0x00000001ff077424 */
/* 0x000fe200078e00ff */
/*0680*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */
/* 0x000fc800038e0100 */
/*0690*/ STG.E.STRONG.SYS [R12.64], R7 ; /* 0x000000070c007986 */
/* 0x0005e2000c115908 */
/*06a0*/ LOP3.LUT P0, RZ, R0, UR6, R19, 0x40, !PT ; /* 0x0000000600ff7c12 */
/* 0x001fda000f804013 */
/*06b0*/ @!P0 BRA.CONV UR5, 0x6e0 ; /* 0x0000002305008947 */
/* 0x002fea000b800000 */
/*06c0*/ MOV R6, 0x6e0 ; /* 0x000006e000067802 */
/* 0x004fe40000000f00 */
/*06d0*/ CALL.REL.NOINC 0x11f0 ; /* 0x00000b1000007944 */
/* 0x000fea0003c00000 */
/*06e0*/ NOP ; /* 0x0000000000007918 */
/* 0x004fcc0000000000 */
/*06f0*/ BRA 0x760 ; /* 0x0000006000007947 */
/* 0x000fea0003800000 */
/*0700*/ IMAD.IADD R7, R8, 0x1, R17 ; /* 0x0000000108077824 */
/* 0x000fca00078e0211 */
/*0710*/ IADD3 R6, P0, R7, c[0x0][0x190], RZ ; /* 0x0000640007067a10 */
/* 0x000fc80007f1e0ff */
/*0720*/ LEA.HI.X.SX32 R7, R7, c[0x0][0x194], 0x1, P0 ; /* 0x0000650007077a11 */
/* 0x000fca00000f0eff */
/*0730*/ STG.E.U8 [R6.64], R19 ; /* 0x0000001306007986 */
/* 0x0003e8000c101108 */
/*0740*/ LDG.E.U8 R21, [R4.64] ; /* 0x0000000804157981 */
/* 0x001ea8000c1e1100 */
/*0750*/ STG.E.U8 [R6.64+0x1], R21 ; /* 0x0000011506007986 */
/* 0x0043e4000c101108 */
/*0760*/ IADD3 R17, R17, c[0x0][0x160], RZ ; /* 0x0000580011117a10 */
/* 0x000fc80007ffe0ff */
/*0770*/ ISETP.GE.AND P0, PT, R17, 0x40000, PT ; /* 0x000400001100780c */
/* 0x000fda0003f06270 */
/*0780*/ @!P0 BRA 0x400 ; /* 0xfffffc7000008947 */
/* 0x000fea000383ffff */
/*0790*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*07a0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x1 ; /* 0x00000001ff037424 */
/* 0x000fe200078e00ff */
/*07b0*/ LEA.HI R14, R14, c[0x0][0x160], RZ, 0x1 ; /* 0x000058000e0e7a11 */
/* 0x000fe200078f08ff */
/*07c0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe400078e00ff */
/*07d0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x20000 ; /* 0x00020000ff097424 */
/* 0x000fe200078e00ff */
/*07e0*/ IADD3 R3, R3, c[0x0][0x164], RZ ; /* 0x0000590003037a10 */
/* 0x000fe40007ffe0ff */
/*07f0*/ SHF.R.S32.HI R14, RZ, 0x1, R14 ; /* 0x00000001ff0e7819 */
/* 0x000fc6000001140e */
/*0800*/ IMAD R2, R3.reuse, c[0x0][0x160], R8 ; /* 0x0000580003027a24 */
/* 0x040fe400078e0208 */
/*0810*/ IMAD R10, R3, R14, R10 ; /* 0x0000000e030a7224 */
/* 0x000fc600078e020a */
/*0820*/ IADD3 R4, R2, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x000fe20007ffe0ff */
/*0830*/ IMAD.WIDE.U32 R10, R10, R11, c[0x0][0x178] ; /* 0x00005e000a0a7625 */
/* 0x000fe200078e000b */
/*0840*/ IADD3 R2, P0, R2, c[0x0][0x168], RZ ; /* 0x00005a0002027a10 */
/* 0x000fe40007f1e0ff */
/*0850*/ IADD3 R4, P1, R4, c[0x0][0x168], RZ ; /* 0x00005a0004047a10 */
/* 0x000fc60007f3e0ff */
/*0860*/ IMAD.X R3, RZ, RZ, c[0x0][0x16c], P0 ; /* 0x00005b00ff037624 */
/* 0x000fe400000e06ff */
/*0870*/ IMAD.X R5, RZ, RZ, c[0x0][0x16c], P1 ; /* 0x00005b00ff057624 */
/* 0x000fe400008e06ff */
/*0880*/ IMAD.IADD R6, R8, 0x1, R9 ; /* 0x0000000108067824 */
/* 0x000fe400078e0209 */
/*0890*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fc800078e00ff */
/*08a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x188] ; /* 0x0000620006067625 */
/* 0x000fca00078e0207 */
/*08b0*/ LDG.E R12, [R6.64] ; /* 0x00000008060c7981 */
/* 0x000168000c1e1900 */
/*08c0*/ LDG.E R15, [R6.64+0x4] ; /* 0x00000408060f7981 */
/* 0x000162000c1e1900 */
/*08d0*/ IADD3 R9, R9, c[0x0][0x160], RZ ; /* 0x0000580009097a10 */
/* 0x000fe20007ffe0ff */
/*08e0*/ BSSY B0, 0x950 ; /* 0x0000006000007945 */
/* 0x000fe60003800000 */
/*08f0*/ ISETP.GE.AND P0, PT, R9, 0x40000, PT ; /* 0x000400000900780c */
/* 0x000fe40003f06270 */
/*0900*/ LDG.E.STRONG.SYS R6, [R10.64] ; /* 0x000000080a067981 */
/* 0x001ea2000c1f5900 */
/*0910*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*0920*/ ISETP.NE.AND P1, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x004fda0003f25270 */
/*0930*/ @!P1 BRA 0x900 ; /* 0xffffffc000009947 */
/* 0x000fea000383ffff */
/*0940*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0950*/ MATCH.ANY R17, R0 ; /* 0x00000000001173a1 */
/* 0x000e2200000e8000 */
/*0960*/ LOP3.LUT R6, R12, c[0x0][0x180], RZ, 0xc0, !PT ; /* 0x000060000c067a12 */
/* 0x020fe200078ec0ff */
/*0970*/ REDUX.OR UR4, R0 ; /* 0x00000000000473c4 */
/* 0x000e620000004000 */
/*0980*/ LOP3.LUT R7, R15, c[0x0][0x180], RZ, 0xc0, !PT ; /* 0x000060000f077a12 */
/* 0x000fe200078ec0ff */
/*0990*/ VOTEU.ANY UR5, UPT, PT ; /* 0x0000000000057886 */
/* 0x000fc600038e0100 */
/*09a0*/ ISETP.GE.AND P1, PT, R6, R7, PT ; /* 0x000000070600720c */
/* 0x000fc80003f26270 */
/*09b0*/ SEL R13, R12, R15, !P1 ; /* 0x0000000f0c0d7207 */
/* 0x000fe40004800000 */
/*09c0*/ SEL R7, R15, R12, !P1 ; /* 0x0000000c0f077207 */
/* 0x000fe20004800000 */
/*09d0*/ IMAD.MOV.U32 R15, RZ, RZ, 0x1 ; /* 0x00000001ff0f7424 */
/* 0x000fe400078e00ff */
/*09e0*/ STG.E.U8 [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x0005e8000c101108 */
/*09f0*/ STG.E.U8 [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0005e8000c101108 */
/*0a00*/ STG.E.STRONG.SYS [R10.64], R15 ; /* 0x0000000f0a007986 */
/* 0x0005e2000c115908 */
/*0a10*/ LOP3.LUT P1, RZ, R0, UR5, R17, 0x40, !PT ; /* 0x0000000500ff7c12 */
/* 0x001fda000f824011 */
/*0a20*/ @!P1 BRA.CONV UR4, 0xa50 ; /* 0x0000002304009947 */
/* 0x002fea000b800000 */
/*0a30*/ MOV R6, 0xa50 ; /* 0x00000a5000067802 */
/* 0x004fe40000000f00 */
/*0a40*/ CALL.REL.NOINC 0x11f0 ; /* 0x000007a000007944 */
/* 0x000fea0003c00000 */
/*0a50*/ NOP ; /* 0x0000000000007918 */
/* 0x004fcc0000000000 */
/*0a60*/ @!P0 BRA 0x880 ; /* 0xfffffe1000008947 */
/* 0x000fea000383ffff */
/*0a70*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0a80*/ LEA.HI R14, R14, c[0x0][0x160], RZ, 0x1 ; /* 0x000058000e0e7a11 */
/* 0x000fe200078f08ff */
/*0a90*/ UMOV UR4, 0x1 ; /* 0x0000000100047882 */
/* 0x000fe20000000000 */
/*0aa0*/ IADD3 R7, R9, -0x1, RZ ; /* 0xffffffff09077810 */
/* 0x000fe20007ffe0ff */
/*0ab0*/ ULDC UR5, c[0x0][0x164] ; /* 0x0000590000057ab9 */
/* 0x000fe20000000800 */
/*0ac0*/ SHF.R.S32.HI R14, RZ, 0x1, R14 ; /* 0x00000001ff0e7819 */
/* 0x000fe2000001140e */
/*0ad0*/ UIADD3 UR5, -UR4, UR5, URZ ; /* 0x0000000504057290 */
/* 0x000fe2000fffe13f */
/*0ae0*/ IMAD.MOV.U32 R15, RZ, RZ, 0x4 ; /* 0x00000004ff0f7424 */
/* 0x000fe200078e00ff */
/*0af0*/ ULDC UR6, c[0x0][0xc] ; /* 0x0000030000067ab9 */
/* 0x000fe20000000800 */
/*0b00*/ IMAD R16, R7, c[0x0][0x160], R8 ; /* 0x0000580007107a24 */
/* 0x000fe200078e0208 */
/*0b10*/ UIADD3 UR4, -UR4, UR6, URZ ; /* 0x0000000604047290 */
/* 0x000fe2000fffe13f */
/*0b20*/ IMAD R13, R14, R9, R10 ; /* 0x000000090e0d7224 */
/* 0x000fe200078e020a */
/*0b30*/ ISETP.NE.AND P0, PT, R9, UR5, PT ; /* 0x0000000509007c0c */
/* 0x000fe2000bf05270 */
/*0b40*/ IMAD.WIDE.U32 R10, R16, R15, c[0x0][0x170] ; /* 0x00005c00100a7625 */
/* 0x000fc600078e000f */
/*0b50*/ ISETP.NE.AND P0, PT, R9, UR4, P0 ; /* 0x0000000409007c0c */
/* 0x000fe20008705270 */
/*0b60*/ IMAD.IADD R14, R14, 0x1, R13 ; /* 0x000000010e0e7824 */
/* 0x000fe400078e020d */
/*0b70*/ IMAD.WIDE.U32 R12, R13, R15, c[0x0][0x178] ; /* 0x00005e000d0c7625 */
/* 0x000fc800078e000f */
/*0b80*/ IMAD.MOV.U32 R17, RZ, RZ, RZ ; /* 0x000000ffff117224 */
/* 0x000fe400078e00ff */
/*0b90*/ IMAD.WIDE.U32 R14, R14, R15, c[0x0][0x178] ; /* 0x00005e000e0e7625 */
/* 0x000fca00078e000f */
/*0ba0*/ BSSY B0, 0xc00 ; /* 0x0000005000007945 */
/* 0x002fe40003800000 */
/*0bb0*/ LDG.E.STRONG.SYS R6, [R12.64] ; /* 0x000000080c067981 */
/* 0x000ea2000c1f5900 */
/*0bc0*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*0bd0*/ ISETP.NE.AND P1, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x004fda0003f25270 */
/*0be0*/ @!P1 BRA 0xbb0 ; /* 0xffffffc000009947 */
/* 0x000fea000383ffff */
/*0bf0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0c00*/ IADD3 R6, R16, 0x1, RZ ; /* 0x0000000110067810 */
/* 0x000fe20007ffe0ff */
/*0c10*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe200078e00ff */
/*0c20*/ LDG.E R18, [R10.64] ; /* 0x000000080a127981 */
/* 0x000ea6000c1e1900 */
/*0c30*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fcc00078e0007 */
/*0c40*/ LDG.E R6, [R6.64] ; /* 0x0000000806067981 */
/* 0x000ee4000c1e1900 */
/*0c50*/ IMAD R20, R9, c[0x0][0x160], R6 ; /* 0x0000580009147a24 */
/* 0x008fca00078e0206 */
/*0c60*/ IADD3 R20, P1, R20, c[0x0][0x168], RZ ; /* 0x00005a0014147a10 */
/* 0x000fca0007f3e0ff */
/*0c70*/ IMAD.X R21, RZ, RZ, c[0x0][0x16c], P1 ; /* 0x00005b00ff157624 */
/* 0x000fcc00008e06ff */
/*0c80*/ LDG.E.U8 R21, [R20.64] ; /* 0x0000000814157981 */
/* 0x000ee2000c1e1100 */
/*0c90*/ IMAD R18, R9, c[0x0][0x160], R18 ; /* 0x0000580009127a24 */
/* 0x004fca00078e0212 */
/*0ca0*/ IADD3 R18, P1, R18, c[0x0][0x168], RZ ; /* 0x00005a0012127a10 */
/* 0x000fca0007f3e0ff */
/*0cb0*/ IMAD.X R19, RZ, RZ, c[0x0][0x16c], P1 ; /* 0x00005b00ff137624 */
/* 0x000fca00008e06ff */
/*0cc0*/ LDG.E.U8 R18, [R18.64] ; /* 0x0000000812127981 */
/* 0x000168000c1e1100 */
/*0cd0*/ STG.E.STRONG.SYS [R12.64], RZ ; /* 0x000000ff0c007986 */
/* 0x0001e2000c115908 */
/*0ce0*/ BSSY B0, 0xd50 ; /* 0x0000006000007945 */
/* 0x000fe20003800000 */
/*0cf0*/ PRMT R6, R21, 0x8880, RZ ; /* 0x0000888015067816 */
/* 0x009fe400000000ff */
/*0d00*/ LDG.E.STRONG.SYS R7, [R14.64] ; /* 0x000000080e077981 */
/* 0x000ea2000c1f5900 */
/*0d10*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*0d20*/ ISETP.NE.AND P1, PT, R7, 0x1, PT ; /* 0x000000010700780c */
/* 0x004fda0003f25270 */
/*0d30*/ @!P1 BRA 0xd00 ; /* 0xffffffc000009947 */
/* 0x000fea000383ffff */
/*0d40*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0d50*/ PRMT R7, R18, 0x8880, RZ ; /* 0x0000888012077816 */
/* 0x020fe400000000ff */
/*0d60*/ LOP3.LUT R6, R6, c[0x0][0x180], RZ, 0xc0, !PT ; /* 0x0000600006067a12 */
/* 0x000fe400078ec0ff */
/*0d70*/ LOP3.LUT R7, R7, c[0x0][0x180], RZ, 0xc0, !PT ; /* 0x0000600007077a12 */
/* 0x000fc800078ec0ff */
/*0d80*/ ISETP.GE.AND P1, PT, R7, R6, PT ; /* 0x000000060700720c */
/* 0x000fc80003f26270 */
/*0d90*/ SEL R19, R18, R21, !P1 ; /* 0x0000001512137207 */
/* 0x000fe40004800000 */
/*0da0*/ SEL R21, R21, R18, !P1 ; /* 0x0000001215157207 */
/* 0x000fc60004800000 */
/*0db0*/ STG.E.U8 [R2.64], R19 ; /* 0x0000001302007986 */
/* 0x0001e8000c101108 */
/*0dc0*/ STG.E.U8 [R4.64], R21 ; /* 0x0000001504007986 */
/* 0x0001e2000c101108 */
/*0dd0*/ @P0 BRA 0xe50 ; /* 0x0000007000000947 */
/* 0x000fea0003800000 */
/*0de0*/ IMAD.IADD R7, R8, 0x1, R17 ; /* 0x0000000108077824 */
/* 0x000fca00078e0211 */
/*0df0*/ IADD3 R6, P1, R7, c[0x0][0x190], RZ ; /* 0x0000640007067a10 */
/* 0x000fc80007f3e0ff */
/*0e00*/ LEA.HI.X.SX32 R7, R7, c[0x0][0x194], 0x1, P1 ; /* 0x0000650007077a11 */
/* 0x000fca00008f0eff */
/*0e10*/ STG.E.U8 [R6.64], R19 ; /* 0x0000001306007986 */
/* 0x0003e8000c101108 */
/*0e20*/ LDG.E.U8 R21, [R4.64] ; /* 0x0000000804157981 */
/* 0x001ea8000c1e1100 */
/*0e30*/ STG.E.U8 [R6.64+0x1], R21 ; /* 0x0000011506007986 */
/* 0x0043e2000c101108 */
/*0e40*/ BRA 0xef0 ; /* 0x000000a000007947 */
/* 0x000fea0003800000 */
/*0e50*/ MATCH.ANY R19, R0 ; /* 0x00000000001373a1 */
/* 0x001e2200000e8000 */
/*0e60*/ REDUX.OR UR4, R0 ; /* 0x00000000000473c4 */
/* 0x000e620000004000 */
/*0e70*/ IMAD.MOV.U32 R7, RZ, RZ, 0x1 ; /* 0x00000001ff077424 */
/* 0x000fe200078e00ff */
/*0e80*/ VOTEU.ANY UR5, UPT, PT ; /* 0x0000000000057886 */
/* 0x000fc800038e0100 */
/*0e90*/ STG.E.STRONG.SYS [R14.64], R7 ; /* 0x000000070e007986 */
/* 0x0005e2000c115908 */
/*0ea0*/ LOP3.LUT P1, RZ, R0, UR5, R19, 0x40, !PT ; /* 0x0000000500ff7c12 */
/* 0x001fda000f824013 */
/*0eb0*/ @!P1 BRA.CONV UR4, 0xee0 ; /* 0x0000002304009947 */
/* 0x002fea000b800000 */
/*0ec0*/ MOV R6, 0xee0 ; /* 0x00000ee000067802 */
/* 0x004fe40000000f00 */
/*0ed0*/ CALL.REL.NOINC 0x11f0 ; /* 0x0000031000007944 */
/* 0x000fea0003c00000 */
/*0ee0*/ NOP ; /* 0x0000000000007918 */
/* 0x004fcc0000000000 */
/*0ef0*/ IADD3 R17, R17, c[0x0][0x160], RZ ; /* 0x0000580011117a10 */
/* 0x000fc80007ffe0ff */
/*0f00*/ ISETP.GE.AND P1, PT, R17, 0x20000, PT ; /* 0x000200001100780c */
/* 0x000fda0003f26270 */
/*0f10*/ @!P1 BRA 0xba0 ; /* 0xfffffc8000009947 */
/* 0x000fea000383ffff */
/*0f20*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0f30*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff037624 */
/* 0x000fe200078e00ff */
/*0f40*/ LEA R2, R10, c[0x0][0x160], 0x1 ; /* 0x000058000a027a11 */
/* 0x000fe200078e08ff */
/*0f50*/ IMAD.MOV.U32 R9, RZ, RZ, 0x4 ; /* 0x00000004ff097424 */
/* 0x000fe400078e00ff */
/*0f60*/ IMAD.MOV.U32 R11, RZ, RZ, RZ ; /* 0x000000ffff0b7224 */
/* 0x000fe200078e00ff */
/*0f70*/ LEA.HI R3, R3, c[0x0][0x160], RZ, 0x1 ; /* 0x0000580003037a11 */
/* 0x000fe400078f08ff */
/*0f80*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0f90*/ IADD3 R2, P0, R2, c[0x0][0x168], RZ ; /* 0x00005a0002027a10 */
/* 0x000fe40007f1e0ff */
/*0fa0*/ LEA.HI.SX32 R8, R3, R10, 0x1f ; /* 0x0000000a03087211 */
/* 0x000fc400078ffaff */
/*0fb0*/ IADD3 R4, P1, R4, c[0x0][0x168], RZ ; /* 0x00005a0004047a10 */
/* 0x000fe20007f3e0ff */
/*0fc0*/ IMAD.X R3, RZ, RZ, c[0x0][0x16c], P0 ; /* 0x00005b00ff037624 */
/* 0x000fe400000e06ff */
/*0fd0*/ IMAD.WIDE.U32 R8, R8, R9, c[0x0][0x178] ; /* 0x00005e0008087625 */
/* 0x000fc800078e0009 */
/*0fe0*/ IMAD.X R5, RZ, RZ, c[0x0][0x16c], P1 ; /* 0x00005b00ff057624 */
/* 0x000fe400008e06ff */
/*0ff0*/ IMAD R6, R10, 0x2, R11 ; /* 0x000000020a067824 */
/* 0x000fe400078e020b */
/*1000*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x001fc800078e00ff */
/*1010*/ IMAD.WIDE R6, R6, R7, c[0x0][0x188] ; /* 0x0000620006067625 */
/* 0x000fca00078e0207 */
/*1020*/ LDG.E R12, [R6.64] ; /* 0x00000008060c7981 */
/* 0x000168000c1e1900 */
/*1030*/ LDG.E R15, [R6.64+0x4] ; /* 0x00000408060f7981 */
/* 0x000162000c1e1900 */
/*1040*/ IADD3 R11, R11, c[0x0][0x160], RZ ; /* 0x000058000b0b7a10 */
/* 0x000fe20007ffe0ff */
/*1050*/ BSSY B0, 0x10c0 ; /* 0x0000006000007945 */
/* 0x000fe60003800000 */
/*1060*/ ISETP.GE.AND P0, PT, R11, 0x20000, PT ; /* 0x000200000b00780c */
/* 0x000fe40003f06270 */
/*1070*/ LDG.E.STRONG.SYS R6, [R8.64] ; /* 0x0000000808067981 */
/* 0x001ea2000c1f5900 */
/*1080*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*1090*/ ISETP.NE.AND P1, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x004fda0003f25270 */
/*10a0*/ @!P1 BRA 0x1070 ; /* 0xffffffc000009947 */
/* 0x000fea000383ffff */
/*10b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*10c0*/ MATCH.ANY R17, R0 ; /* 0x00000000001173a1 */
/* 0x000e2200000e8000 */
/*10d0*/ LOP3.LUT R6, R12, c[0x0][0x180], RZ, 0xc0, !PT ; /* 0x000060000c067a12 */
/* 0x020fe200078ec0ff */
/*10e0*/ REDUX.OR UR4, R0 ; /* 0x00000000000473c4 */
/* 0x000e620000004000 */
/*10f0*/ LOP3.LUT R7, R15, c[0x0][0x180], RZ, 0xc0, !PT ; /* 0x000060000f077a12 */
/* 0x000fe200078ec0ff */
/*1100*/ VOTEU.ANY UR5, UPT, PT ; /* 0x0000000000057886 */
/* 0x000fc600038e0100 */
/*1110*/ ISETP.GE.AND P1, PT, R6, R7, PT ; /* 0x000000070600720c */
/* 0x000fc80003f26270 */
/*1120*/ SEL R13, R12, R15, !P1 ; /* 0x0000000f0c0d7207 */
/* 0x000fe40004800000 */
/*1130*/ SEL R7, R15, R12, !P1 ; /* 0x0000000c0f077207 */
/* 0x000fc60004800000 */
/*1140*/ STG.E.U8 [R2.64], R13 ; /* 0x0000000d02007986 */
/* 0x0005e8000c101108 */
/*1150*/ STG.E.U8 [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0005e2000c101108 */
/*1160*/ LOP3.LUT P1, RZ, R0, UR5, R17, 0x40, !PT ; /* 0x0000000500ff7c12 */
/* 0x001fda000f824011 */
/*1170*/ @!P1 BRA.CONV UR4, 0x11a0 ; /* 0x0000002304009947 */
/* 0x002fea000b800000 */
/*1180*/ MOV R6, 0x11a0 ; /* 0x000011a000067802 */
/* 0x004fe40000000f00 */
/*1190*/ CALL.REL.NOINC 0x11f0 ; /* 0x0000005000007944 */
/* 0x000fea0003c00000 */
/*11a0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x1 ; /* 0x00000001ff077424 */
/* 0x004fe200078e00ff */
/*11b0*/ NOP ; /* 0x0000000000007918 */
/* 0x000fcc0000000000 */
/*11c0*/ STG.E.STRONG.SYS [R8.64], R7 ; /* 0x0000000708007986 */
/* 0x0001e2000c115908 */
/*11d0*/ @!P0 BRA 0xff0 ; /* 0xfffffe1000008947 */
/* 0x000fea000383ffff */
/*11e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*11f0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x000fe200078e00ff */
/*1200*/ WARPSYNC R0 ; /* 0x0000000000007348 */
/* 0x000fe80003800000 */
/*1210*/ RET.REL.NODEC R6 0x0 ; /* 0xffffede006007950 */
/* 0x000fea0003c3ffff */
/*1220*/ BRA 0x1220; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*1230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*12a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*12b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*12c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*12d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*12e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*12f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //put C:/Users/molly/Desktop/289Q/project/main.cu
//nvcc -std=c++11 main.cu
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include <cooperative_groups.h>
#include <cooperative_groups.h>
// includes, project
#include <cuda.h>
#include <cuda_runtime.h>
using namespace cooperative_groups;
namespace cg = cooperative_groups;
// #define FILESIZE_CHAR 1048576
#define FILESIZE_CHAR 1048576
#define FILESIZE_INT FILESIZE_CHAR/4
__host__
void makeLUT(int N, int* LUT){
int M = N;
int even = 0;
int odd = 1;
int LUTsize = N*(log2((double)N)*2 - 2);
for (int i =0; i < LUTsize/2; i+=N){
for (int j=0; j<N; j+=M){
for (int k =0; k<M/2; k++){
LUT[i+j+k] = even;
even+=2;
}
for (int k =M/2; k<M; k++){
LUT[i+j+k] = odd;
odd+=2;
}
} even=0; odd=1; M = M/2;
}
for (int x=LUTsize-N, i=LUTsize/2; i<LUTsize;i+=N, x-=N){
for(int j=0; j<N; j++){
int newIndex = LUT[x+j-LUTsize/2];
LUT[newIndex + i] = j;
}
}
return;
}
int createMask(int n)
{
int r = 0;
for (int i=0; i<n; i++)
r |= 1 << i;
return r;
}
__global__
void benes(int N, int block, char* network, int* LUT, volatile int* valid, int mask, int* data, char* output){
int idx = threadIdx.x;
int in1, in2, in1_index, in2_index;
int readOffset=0;
int fileSize = FILESIZE_INT/2;
int readOffsetSecondNet=fileSize;
thread_group g = tiled_partition(this_thread_block(), 2); //stops working after 32?
if(blockIdx.x == 0){
while(readOffset < fileSize){
in1 = data[idx*2 + readOffset];
in2 = data[idx*2+1 + readOffset];
readOffset+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
g.sync();
// __syncthreads();
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
}
}
else if ( blockIdx.x < block) {
while(readOffset < fileSize){
while((valid[idx + (blockIdx.x)*(N/2)])==0);
in1_index = LUT[idx*2 + (blockIdx.x-1)*N];
in2_index = LUT[idx*2 + (blockIdx.x-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
valid[idx + (blockIdx.x)*(N/2)] = 0;// valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
if (blockIdx.x != gridDim.x - 1 && blockIdx.x != block-1){
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
g.sync();
// __syncthreads();
}
else {
output[idx*2 + readOffset] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffset] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffset += N;
}
}
else if(blockIdx.x == block){
while(readOffsetSecondNet < FILESIZE_INT){
in1 = data[idx*2 + readOffsetSecondNet];
in2 = data[idx*2+1 + readOffsetSecondNet];
readOffsetSecondNet+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
}
}
else{
while(readOffsetSecondNet < FILESIZE_INT){
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
while((valid[idx + (blockIdx.x)*(N/2)])==0);
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
in1_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N];
in2_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
// printf("Block %d thread %d consumed %d %d\n", blockIdx.x,threadIdx.x, in1, in2);
valid[idx + (blockIdx.x)*(N/2)] = 0; //valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
//printf("waiting for next block %d to consume\n", blockIdx.x + 1);
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
// printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
//printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
if (blockIdx.x != gridDim.x - 1){
valid[idx + (blockIdx.x+1)*(N/2)]=1; //valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
//printf("valid:%d index:%d\n",valid[idx + (blockIdx.x+1)*N],idx + (blockIdx.x+1)*N);
}
else {
output[idx*2 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffsetSecondNet += N;
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Usage: %s <input.txt> <size>\n", argv[0]);
return 1;
}
std::ifstream file(argv[1], std::ios::binary);
if (!file) {
printf("Could not open input file\n");
return 1;
}
int N = atoi(argv[2]);
if (FILESIZE_INT<N)
N = FILESIZE_INT;
int blockSize = N/2;
int blocks = 2*log2((double)N)-1;
int b = 2*log2((double)N)-1;
int LUTsize = N*(log2((double)N)*2 - 2);
int numBlocks;
if (FILESIZE_INT <= N)
numBlocks = blocks;
else
numBlocks = 2*blocks;
char* network;
cudaMallocManaged(&network,N*(numBlocks+1)*sizeof(char));
memset(network,0,N*(numBlocks+1)*sizeof(char));
int* LUT;
cudaMallocManaged(&LUT,LUTsize*sizeof(int));
makeLUT(N,LUT);
int mask = createMask(log2((double)N));
int *valid;
cudaMallocManaged(&valid,(N/2)*(numBlocks)*sizeof(int));
memset(valid,0,(N/2)*(numBlocks+1)*sizeof(int));
for(int i = 0; i < N/2; i++)
valid[i] = 1;
char* data;
cudaMallocManaged(&data,FILESIZE_CHAR*sizeof(char));
memset(data,0,FILESIZE_CHAR*sizeof(char));
file.read(data, FILESIZE_CHAR*sizeof(char));
file.close();
int* idata;
cudaMallocManaged(&idata,FILESIZE_CHAR*sizeof(char));
memcpy(idata, data, FILESIZE_CHAR*sizeof(char));
char* output;
cudaMallocManaged(&output,FILESIZE_CHAR*sizeof(char));
memset(output,0,FILESIZE_CHAR*sizeof(char));
benes<<<numBlocks,blockSize>>>(N, blocks, network, LUT, valid, mask, idata, output);
cudaDeviceSynchronize();
// printf("The input is:");
// for (int i = 0; i < FILESIZE_INT; i++){
// if (i%N == 0) printf("\n");
// printf("%d ", idata[i]);
// }
// printf("\n\n");
for (int i = 0; i < FILESIZE_INT-1; i++){
if ((i%N != N-1) && (output[i+1]!=0)) {
if((mask & output[i+1]) < (mask & output[i])){
printf("ERROR in routing at output %d %d %d\n",i ,mask & output[i+1],mask &output[i] );
return 1;
}
}
}
printf("Routing was successful!\n");
cudaFree(valid);
cudaFree(LUT);
cudaFree(network);
cudaFree(data);
cudaFree(idata);
cudaFree(output);
} | .file "tmpxft_00085769_00000000-6_main.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB7002:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE7002:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7makeLUTiPi
.type _Z7makeLUTiPi, @function
_Z7makeLUTiPi:
.LFB6997:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movl %edi, %r15d
movq %rsi, 48(%rsp)
pxor %xmm1, %xmm1
cvtsi2sdl %edi, %xmm1
movsd %xmm1, 8(%rsp)
movapd %xmm1, %xmm0
call log2@PLT
addsd %xmm0, %xmm0
subsd .LC0(%rip), %xmm0
mulsd 8(%rsp), %xmm0
cvttsd2sil %xmm0, %ecx
movl %ecx, %eax
shrl $31, %eax
addl %ecx, %eax
sarl %eax
movl %eax, %r8d
movl %eax, %edx
cmpl $1, %ecx
jle .L4
movslq %r15d, %rax
movq %rax, 40(%rsp)
movl %r15d, %ebp
movl $0, %eax
movl $0, %edi
movl %ecx, 56(%rsp)
movl %r8d, 36(%rsp)
movq %rax, %rdx
movl %r8d, 60(%rsp)
jmp .L5
.L6:
movl %edx, (%rcx,%rax,4)
addl $2, %edx
addq $1, %rax
cmpl %eax, %esi
jg .L6
leal 2(%r14,%r10), %r10d
.L10:
cmpl %ebp, %esi
jge .L7
movq 16(%rsp), %rax
leaq (%rcx,%rax), %rdx
leal 0(%r13,%r8), %edi
movl %r8d, %eax
.L8:
movl %eax, (%rdx)
addl $2, %eax
addq $4, %rdx
cmpl %edi, %eax
jne .L8
leal 2(%r11,%r8), %r8d
.L7:
addl %r12d, %r9d
addq %rbx, %rcx
movl %r9d, %eax
movl 8(%rsp), %edi
subl %edi, %eax
cmpl %eax, %r15d
jle .L23
.L11:
movl %r10d, %edx
movl $0, %eax
cmpl $1, %ebp
jg .L6
jmp .L10
.L23:
movl 32(%rsp), %edi
movq 24(%rsp), %rdx
.L9:
movl %ebp, %eax
shrl $31, %eax
addl %ebp, %eax
sarl %eax
movl %eax, %ebp
addl %r15d, %edi
movq 40(%rsp), %rax
addq %rax, %rdx
movl 36(%rsp), %eax
cmpl %eax, %edi
jge .L25
.L5:
testl %r15d, %r15d
jle .L9
movl %ebp, %esi
shrl $31, %esi
addl %ebp, %esi
sarl %esi
movl %ebp, %r12d
movl %edi, 8(%rsp)
movslq %ebp, %rbx
salq $2, %rbx
movq 48(%rsp), %rax
leaq (%rax,%rdx,4), %rcx
leal -2(%rsi,%rsi), %r14d
cmpl $1, %ebp
movl $0, %eax
cmovle %eax, %r14d
movslq %esi, %rax
salq $2, %rax
movq %rax, 16(%rsp)
leal -1(%rbp), %r11d
subl %esi, %r11d
addl %r11d, %r11d
movl %edi, %r9d
movl $1, %r8d
movl $0, %r10d
movl %ebp, %r13d
subl %esi, %r13d
addl %r13d, %r13d
movl %edi, 32(%rsp)
movq %rdx, 24(%rsp)
jmp .L11
.L25:
movl 56(%rsp), %ecx
movl 36(%rsp), %edx
movl 60(%rsp), %r8d
.L4:
movl %ecx, %eax
subl %r15d, %eax
cmpl %r8d, %ecx
jle .L3
movl %r15d, %r9d
negl %r9d
movslq %r9d, %r9
salq $2, %r9
movslq %r8d, %r8
cltq
subq %r8, %rax
movq 48(%rsp), %r10
leaq (%r10,%rax,4), %rdi
movslq %r15d, %r8
jmp .L13
.L14:
movl %edx, %esi
addl (%rdi,%rax,4), %esi
movslq %esi, %rsi
movl %eax, (%r10,%rsi,4)
addq $1, %rax
cmpq %rax, %r8
jne .L14
.L16:
addl %r15d, %edx
addq %r9, %rdi
cmpl %edx, %ecx
jle .L3
.L13:
movl $0, %eax
testl %r15d, %r15d
jg .L14
jmp .L16
.L3:
addq $72, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE6997:
.size _Z7makeLUTiPi, .-_Z7makeLUTiPi
.globl _Z10createMaski
.type _Z10createMaski, @function
_Z10createMaski:
.LFB6998:
.cfi_startproc
endbr64
testl %edi, %edi
jle .L30
movl $0, %ecx
movl $0, %eax
movl $1, %esi
.L29:
movl %esi, %edx
sall %cl, %edx
orl %edx, %eax
addl $1, %ecx
cmpl %ecx, %edi
jne .L29
ret
.L30:
movl $0, %eax
ret
.cfi_endproc
.LFE6998:
.size _Z10createMaski, .-_Z10createMaski
.globl _Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_
.type _Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_, @function
_Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_:
.LFB7024:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movq %rdx, 48(%rsp)
movq %rcx, 40(%rsp)
movq %r8, 32(%rsp)
movl %r9d, 28(%rsp)
movq 224(%rsp), %rax
movq %rax, 16(%rsp)
movq 232(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rax
movq %rax, 160(%rsp)
leaq 28(%rsp), %rax
movq %rax, 168(%rsp)
leaq 16(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L36
.L32:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L37
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L36:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z5benesiiPcPiPViiS0_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L32
.L37:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE7024:
.size _Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_, .-_Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_
.globl _Z5benesiiPcPiPViiS0_S_
.type _Z5benesiiPcPiPViiS0_S_, @function
_Z5benesiiPcPiPViiS0_S_:
.LFB7025:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE7025:
.size _Z5benesiiPcPiPViiS0_S_, .-_Z5benesiiPcPiPViiS0_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z5benesiiPcPiPViiS0_S_"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "_ZN37_INTERNAL_9a227ec7_7_main_cu_c848ebe24cuda3std3__419piecewise_constructE"
.align 8
.LC3:
.string "_ZN37_INTERNAL_9a227ec7_7_main_cu_c848ebe24cuda3std6ranges3__45__cpo4swapE"
.align 8
.LC4:
.string "_ZN37_INTERNAL_9a227ec7_7_main_cu_c848ebe24cuda3std6ranges3__45__cpo9iter_moveE"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB7027:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z5benesiiPcPiPViiS0_S_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $1, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZN4cuda3std3__419piecewise_constructE(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $1, %r9d
movl $0, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _ZN4cuda3std6ranges3__45__cpo4swapE(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $1, %r9d
movl $0, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _ZN4cuda3std6ranges3__45__cpo9iter_moveE(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE7027:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .rodata.str1.1
.LC5:
.string "Usage: %s <input.txt> <size>\n"
.LC6:
.string "Could not open input file\n"
.section .rodata.str1.8
.align 8
.LC8:
.string "ERROR in routing at output %d %d %d\n"
.section .rodata.str1.1
.LC9:
.string "Routing was successful!\n"
.text
.globl main
.type main, @function
main:
.LFB6999:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA6999
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $648, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
cmpl $3, %edi
je .L43
movq (%rsi), %rdx
leaq .LC5(%rip), %rsi
movl $2, %edi
.LEHB0:
call __printf_chk@PLT
movl $1, %r13d
.L42:
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L65
movl %r13d, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L43:
.cfi_restore_state
movq 8(%rsi), %rsi
leaq -576(%rbp), %rdi
movl $4, %edx
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode@PLT
.LEHE0:
movl -288(%rbp), %r13d
andl $5, %r13d
jne .L66
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movl $262144, %ebx
cmpl %ebx, %eax
cmovle %eax, %ebx
movl $2, %ecx
movl %ebx, %eax
cltd
idivl %ecx
movl %eax, %r12d
pxor %xmm1, %xmm1
cvtsi2sdl %ebx, %xmm1
movsd %xmm1, -664(%rbp)
movapd %xmm1, %xmm0
call log2@PLT
addsd %xmm0, %xmm0
subsd .LC7(%rip), %xmm0
cvttsd2sil %xmm0, %r15d
movl %r15d, -680(%rbp)
movsd -664(%rbp), %xmm0
call log2@PLT
movsd -664(%rbp), %xmm0
call log2@PLT
addsd %xmm0, %xmm0
subsd .LC0(%rip), %xmm0
mulsd -664(%rbp), %xmm0
cvttsd2sil %xmm0, %eax
movl %eax, -676(%rbp)
cmpl $262143, %r14d
jg .L47
leal (%r15,%r15), %r15d
.L47:
leal 1(%r15), %eax
imull %ebx, %eax
cltq
leaq -648(%rbp), %rdi
movl $1, %edx
movq %rax, -672(%rbp)
movq %rax, %rsi
.LEHB1:
call cudaMallocManaged@PLT
jmp .L67
.L66:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L68
.L67:
movq -672(%rbp), %rdx
movl $0, %esi
movq -648(%rbp), %rdi
call memset@PLT
movslq -676(%rbp), %rsi
salq $2, %rsi
leaq -640(%rbp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movq -640(%rbp), %rsi
movl %ebx, %edi
call _Z7makeLUTiPi
movsd -664(%rbp), %xmm0
call log2@PLT
cvttsd2sil %xmm0, %edi
call _Z10createMaski
movl %eax, -664(%rbp)
movl %r15d, %eax
imull %r12d, %eax
movl %eax, -672(%rbp)
movslq %eax, %rsi
salq $2, %rsi
leaq -632(%rbp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl -672(%rbp), %edx
addl %r12d, %edx
movslq %edx, %rdx
salq $2, %rdx
movl $0, %esi
movq -632(%rbp), %rdi
call memset@PLT
cmpl $1, %r14d
jle .L48
movl $0, %eax
.L49:
movq -632(%rbp), %rdx
movl $1, (%rdx,%rax,4)
addq $1, %rax
cmpl %eax, %r12d
jg .L49
.L48:
leaq -624(%rbp), %rdi
movl $1, %edx
movl $1048576, %esi
call cudaMallocManaged@PLT
movl $1048576, %edx
movl $0, %esi
movq -624(%rbp), %rdi
call memset@PLT
leaq -576(%rbp), %rdi
movl $1048576, %edx
movq -624(%rbp), %rsi
call _ZNSi4readEPcl@PLT
leaq -576(%rbp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv@PLT
leaq -616(%rbp), %rdi
movl $1, %edx
movl $1048576, %esi
call cudaMallocManaged@PLT
movl $1048576, %edx
movq -624(%rbp), %rsi
movq -616(%rbp), %rdi
call memcpy@PLT
leaq -608(%rbp), %rdi
movl $1, %edx
movl $1048576, %esi
call cudaMallocManaged@PLT
movl $1048576, %edx
movl $0, %esi
movq -608(%rbp), %rdi
call memset@PLT
movl %r12d, -588(%rbp)
movl $1, -584(%rbp)
movl $1, -580(%rbp)
movl %r15d, -600(%rbp)
movl $1, -596(%rbp)
movl $1, -592(%rbp)
movl $0, %r9d
movl $0, %r8d
movq -588(%rbp), %rdx
movl $1, %ecx
movq -600(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L50
pushq -608(%rbp)
pushq -616(%rbp)
movl -664(%rbp), %r9d
movq -632(%rbp), %r8
movq -640(%rbp), %rcx
movq -648(%rbp), %rdx
movl -680(%rbp), %esi
movl %ebx, %edi
.cfi_escape 0x2e,0x10
call _Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_
addq $16, %rsp
.L50:
.cfi_escape 0x2e,0
call cudaDeviceSynchronize@PLT
movq -608(%rbp), %r10
movl $0, %esi
leal -1(%rbx), %r9d
jmp .L52
.L51:
addq $1, %rsi
cmpq $262143, %rsi
je .L69
.L52:
movl %esi, %eax
cltd
idivl %ebx
cmpl %r9d, %edx
je .L51
movzbl 1(%r10,%rsi), %ecx
testb %cl, %cl
je .L51
movsbl %cl, %ecx
movl -664(%rbp), %eax
andl %eax, %ecx
movsbl (%r10,%rsi), %r8d
andl %eax, %r8d
cmpl %r8d, %ecx
jge .L51
movl %esi, %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L70
.L69:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq -632(%rbp), %rdi
call cudaFree@PLT
movq -640(%rbp), %rdi
call cudaFree@PLT
movq -648(%rbp), %rdi
call cudaFree@PLT
movq -624(%rbp), %rdi
call cudaFree@PLT
movq -616(%rbp), %rdi
call cudaFree@PLT
movq -608(%rbp), %rdi
call cudaFree@PLT
.LEHE1:
jmp .L46
.L68:
movl $1, %r13d
jmp .L46
.L70:
movl $1, %r13d
.L46:
leaq -576(%rbp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT
jmp .L42
.L59:
endbr64
movq %rax, %rbx
leaq -576(%rbp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT
movq -56(%rbp), %rax
subq %fs:40, %rax
je .L54
call __stack_chk_fail@PLT
.L54:
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L65:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE6999:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA6999:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE6999-.LLSDACSB6999
.LLSDACSB6999:
.uleb128 .LEHB0-.LFB6999
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB6999
.uleb128 .LEHE1-.LEHB1
.uleb128 .L59-.LFB6999
.uleb128 0
.uleb128 .LEHB2-.LFB6999
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.LLSDACSE6999:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.weak _ZN4cuda3std3__419piecewise_constructE
.section .rodata._ZN4cuda3std3__419piecewise_constructE,"aG",@progbits,_ZN4cuda3std3__419piecewise_constructE,comdat
.type _ZN4cuda3std3__419piecewise_constructE, @gnu_unique_object
.size _ZN4cuda3std3__419piecewise_constructE, 1
_ZN4cuda3std3__419piecewise_constructE:
.zero 1
.weak _ZN4cuda3std6ranges3__45__cpo9iter_moveE
.section .rodata._ZN4cuda3std6ranges3__45__cpo9iter_moveE,"aG",@progbits,_ZN4cuda3std6ranges3__45__cpo9iter_moveE,comdat
.type _ZN4cuda3std6ranges3__45__cpo9iter_moveE, @gnu_unique_object
.size _ZN4cuda3std6ranges3__45__cpo9iter_moveE, 1
_ZN4cuda3std6ranges3__45__cpo9iter_moveE:
.zero 1
.weak _ZN4cuda3std6ranges3__45__cpo4swapE
.section .rodata._ZN4cuda3std6ranges3__45__cpo4swapE,"aG",@progbits,_ZN4cuda3std6ranges3__45__cpo4swapE,comdat
.type _ZN4cuda3std6ranges3__45__cpo4swapE, @gnu_unique_object
.size _ZN4cuda3std6ranges3__45__cpo4swapE, 1
_ZN4cuda3std6ranges3__45__cpo4swapE:
.zero 1
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1073741824
.align 8
.LC7:
.long 0
.long 1072693248
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //put C:/Users/molly/Desktop/289Q/project/main.cu
//nvcc -std=c++11 main.cu
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include <cooperative_groups.h>
#include <cooperative_groups.h>
// includes, project
#include <cuda.h>
#include <cuda_runtime.h>
using namespace cooperative_groups;
namespace cg = cooperative_groups;
// #define FILESIZE_CHAR 1048576
#define FILESIZE_CHAR 1048576
#define FILESIZE_INT FILESIZE_CHAR/4
__host__
void makeLUT(int N, int* LUT){
int M = N;
int even = 0;
int odd = 1;
int LUTsize = N*(log2((double)N)*2 - 2);
for (int i =0; i < LUTsize/2; i+=N){
for (int j=0; j<N; j+=M){
for (int k =0; k<M/2; k++){
LUT[i+j+k] = even;
even+=2;
}
for (int k =M/2; k<M; k++){
LUT[i+j+k] = odd;
odd+=2;
}
} even=0; odd=1; M = M/2;
}
for (int x=LUTsize-N, i=LUTsize/2; i<LUTsize;i+=N, x-=N){
for(int j=0; j<N; j++){
int newIndex = LUT[x+j-LUTsize/2];
LUT[newIndex + i] = j;
}
}
return;
}
int createMask(int n)
{
int r = 0;
for (int i=0; i<n; i++)
r |= 1 << i;
return r;
}
__global__
void benes(int N, int block, char* network, int* LUT, volatile int* valid, int mask, int* data, char* output){
int idx = threadIdx.x;
int in1, in2, in1_index, in2_index;
int readOffset=0;
int fileSize = FILESIZE_INT/2;
int readOffsetSecondNet=fileSize;
thread_group g = tiled_partition(this_thread_block(), 2); //stops working after 32?
if(blockIdx.x == 0){
while(readOffset < fileSize){
in1 = data[idx*2 + readOffset];
in2 = data[idx*2+1 + readOffset];
readOffset+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
g.sync();
// __syncthreads();
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
}
}
else if ( blockIdx.x < block) {
while(readOffset < fileSize){
while((valid[idx + (blockIdx.x)*(N/2)])==0);
in1_index = LUT[idx*2 + (blockIdx.x-1)*N];
in2_index = LUT[idx*2 + (blockIdx.x-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
valid[idx + (blockIdx.x)*(N/2)] = 0;// valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
if (blockIdx.x != gridDim.x - 1 && blockIdx.x != block-1){
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
g.sync();
// __syncthreads();
}
else {
output[idx*2 + readOffset] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffset] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffset += N;
}
}
else if(blockIdx.x == block){
while(readOffsetSecondNet < FILESIZE_INT){
in1 = data[idx*2 + readOffsetSecondNet];
in2 = data[idx*2+1 + readOffsetSecondNet];
readOffsetSecondNet+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
}
}
else{
while(readOffsetSecondNet < FILESIZE_INT){
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
while((valid[idx + (blockIdx.x)*(N/2)])==0);
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
in1_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N];
in2_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
// printf("Block %d thread %d consumed %d %d\n", blockIdx.x,threadIdx.x, in1, in2);
valid[idx + (blockIdx.x)*(N/2)] = 0; //valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
//printf("waiting for next block %d to consume\n", blockIdx.x + 1);
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
// printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
//printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
if (blockIdx.x != gridDim.x - 1){
valid[idx + (blockIdx.x+1)*(N/2)]=1; //valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
//printf("valid:%d index:%d\n",valid[idx + (blockIdx.x+1)*N],idx + (blockIdx.x+1)*N);
}
else {
output[idx*2 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffsetSecondNet += N;
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Usage: %s <input.txt> <size>\n", argv[0]);
return 1;
}
std::ifstream file(argv[1], std::ios::binary);
if (!file) {
printf("Could not open input file\n");
return 1;
}
int N = atoi(argv[2]);
if (FILESIZE_INT<N)
N = FILESIZE_INT;
int blockSize = N/2;
int blocks = 2*log2((double)N)-1;
int b = 2*log2((double)N)-1;
int LUTsize = N*(log2((double)N)*2 - 2);
int numBlocks;
if (FILESIZE_INT <= N)
numBlocks = blocks;
else
numBlocks = 2*blocks;
char* network;
cudaMallocManaged(&network,N*(numBlocks+1)*sizeof(char));
memset(network,0,N*(numBlocks+1)*sizeof(char));
int* LUT;
cudaMallocManaged(&LUT,LUTsize*sizeof(int));
makeLUT(N,LUT);
int mask = createMask(log2((double)N));
int *valid;
cudaMallocManaged(&valid,(N/2)*(numBlocks)*sizeof(int));
memset(valid,0,(N/2)*(numBlocks+1)*sizeof(int));
for(int i = 0; i < N/2; i++)
valid[i] = 1;
char* data;
cudaMallocManaged(&data,FILESIZE_CHAR*sizeof(char));
memset(data,0,FILESIZE_CHAR*sizeof(char));
file.read(data, FILESIZE_CHAR*sizeof(char));
file.close();
int* idata;
cudaMallocManaged(&idata,FILESIZE_CHAR*sizeof(char));
memcpy(idata, data, FILESIZE_CHAR*sizeof(char));
char* output;
cudaMallocManaged(&output,FILESIZE_CHAR*sizeof(char));
memset(output,0,FILESIZE_CHAR*sizeof(char));
benes<<<numBlocks,blockSize>>>(N, blocks, network, LUT, valid, mask, idata, output);
cudaDeviceSynchronize();
// printf("The input is:");
// for (int i = 0; i < FILESIZE_INT; i++){
// if (i%N == 0) printf("\n");
// printf("%d ", idata[i]);
// }
// printf("\n\n");
for (int i = 0; i < FILESIZE_INT-1; i++){
if ((i%N != N-1) && (output[i+1]!=0)) {
if((mask & output[i+1]) < (mask & output[i])){
printf("ERROR in routing at output %d %d %d\n",i ,mask & output[i+1],mask &output[i] );
return 1;
}
}
}
printf("Routing was successful!\n");
cudaFree(valid);
cudaFree(LUT);
cudaFree(network);
cudaFree(data);
cudaFree(idata);
cudaFree(output);
} | //put C:/Users/molly/Desktop/289Q/project/main.cu
//nvcc -std=c++11 main.cu
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include <hip/hip_cooperative_groups.h>
#include <hip/hip_cooperative_groups.h>
// includes, project
#include <hip/hip_runtime.h>
using namespace cooperative_groups;
namespace cg = cooperative_groups;
// #define FILESIZE_CHAR 1048576
#define FILESIZE_CHAR 1048576
#define FILESIZE_INT FILESIZE_CHAR/4
__host__
void makeLUT(int N, int* LUT){
int M = N;
int even = 0;
int odd = 1;
int LUTsize = N*(log2((double)N)*2 - 2);
for (int i =0; i < LUTsize/2; i+=N){
for (int j=0; j<N; j+=M){
for (int k =0; k<M/2; k++){
LUT[i+j+k] = even;
even+=2;
}
for (int k =M/2; k<M; k++){
LUT[i+j+k] = odd;
odd+=2;
}
} even=0; odd=1; M = M/2;
}
for (int x=LUTsize-N, i=LUTsize/2; i<LUTsize;i+=N, x-=N){
for(int j=0; j<N; j++){
int newIndex = LUT[x+j-LUTsize/2];
LUT[newIndex + i] = j;
}
}
return;
}
int createMask(int n)
{
int r = 0;
for (int i=0; i<n; i++)
r |= 1 << i;
return r;
}
__global__
void benes(int N, int block, char* network, int* LUT, volatile int* valid, int mask, int* data, char* output){
int idx = threadIdx.x;
int in1, in2, in1_index, in2_index;
int readOffset=0;
int fileSize = FILESIZE_INT/2;
int readOffsetSecondNet=fileSize;
thread_group g = tiled_partition(this_thread_block(), 2); //stops working after 32?
if(blockIdx.x == 0){
while(readOffset < fileSize){
in1 = data[idx*2 + readOffset];
in2 = data[idx*2+1 + readOffset];
readOffset+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
g.sync();
// __syncthreads();
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
}
}
else if ( blockIdx.x < block) {
while(readOffset < fileSize){
while((valid[idx + (blockIdx.x)*(N/2)])==0);
in1_index = LUT[idx*2 + (blockIdx.x-1)*N];
in2_index = LUT[idx*2 + (blockIdx.x-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
valid[idx + (blockIdx.x)*(N/2)] = 0;// valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
if (blockIdx.x != gridDim.x - 1 && blockIdx.x != block-1){
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
g.sync();
// __syncthreads();
}
else {
output[idx*2 + readOffset] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffset] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffset += N;
}
}
else if(blockIdx.x == block){
while(readOffsetSecondNet < FILESIZE_INT){
in1 = data[idx*2 + readOffsetSecondNet];
in2 = data[idx*2+1 + readOffsetSecondNet];
readOffsetSecondNet+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
}
}
else{
while(readOffsetSecondNet < FILESIZE_INT){
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
while((valid[idx + (blockIdx.x)*(N/2)])==0);
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
in1_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N];
in2_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
// printf("Block %d thread %d consumed %d %d\n", blockIdx.x,threadIdx.x, in1, in2);
valid[idx + (blockIdx.x)*(N/2)] = 0; //valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
//printf("waiting for next block %d to consume\n", blockIdx.x + 1);
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
// printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
//printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
if (blockIdx.x != gridDim.x - 1){
valid[idx + (blockIdx.x+1)*(N/2)]=1; //valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
//printf("valid:%d index:%d\n",valid[idx + (blockIdx.x+1)*N],idx + (blockIdx.x+1)*N);
}
else {
output[idx*2 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffsetSecondNet += N;
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Usage: %s <input.txt> <size>\n", argv[0]);
return 1;
}
std::ifstream file(argv[1], std::ios::binary);
if (!file) {
printf("Could not open input file\n");
return 1;
}
int N = atoi(argv[2]);
if (FILESIZE_INT<N)
N = FILESIZE_INT;
int blockSize = N/2;
int blocks = 2*log2((double)N)-1;
int b = 2*log2((double)N)-1;
int LUTsize = N*(log2((double)N)*2 - 2);
int numBlocks;
if (FILESIZE_INT <= N)
numBlocks = blocks;
else
numBlocks = 2*blocks;
char* network;
hipMallocManaged(&network,N*(numBlocks+1)*sizeof(char));
memset(network,0,N*(numBlocks+1)*sizeof(char));
int* LUT;
hipMallocManaged(&LUT,LUTsize*sizeof(int));
makeLUT(N,LUT);
int mask = createMask(log2((double)N));
int *valid;
hipMallocManaged(&valid,(N/2)*(numBlocks)*sizeof(int));
memset(valid,0,(N/2)*(numBlocks+1)*sizeof(int));
for(int i = 0; i < N/2; i++)
valid[i] = 1;
char* data;
hipMallocManaged(&data,FILESIZE_CHAR*sizeof(char));
memset(data,0,FILESIZE_CHAR*sizeof(char));
file.read(data, FILESIZE_CHAR*sizeof(char));
file.close();
int* idata;
hipMallocManaged(&idata,FILESIZE_CHAR*sizeof(char));
memcpy(idata, data, FILESIZE_CHAR*sizeof(char));
char* output;
hipMallocManaged(&output,FILESIZE_CHAR*sizeof(char));
memset(output,0,FILESIZE_CHAR*sizeof(char));
benes<<<numBlocks,blockSize>>>(N, blocks, network, LUT, valid, mask, idata, output);
hipDeviceSynchronize();
// printf("The input is:");
// for (int i = 0; i < FILESIZE_INT; i++){
// if (i%N == 0) printf("\n");
// printf("%d ", idata[i]);
// }
// printf("\n\n");
for (int i = 0; i < FILESIZE_INT-1; i++){
if ((i%N != N-1) && (output[i+1]!=0)) {
if((mask & output[i+1]) < (mask & output[i])){
printf("ERROR in routing at output %d %d %d\n",i ,mask & output[i+1],mask &output[i] );
return 1;
}
}
}
printf("Routing was successful!\n");
hipFree(valid);
hipFree(LUT);
hipFree(network);
hipFree(data);
hipFree(idata);
hipFree(output);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //put C:/Users/molly/Desktop/289Q/project/main.cu
//nvcc -std=c++11 main.cu
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include <hip/hip_cooperative_groups.h>
#include <hip/hip_cooperative_groups.h>
// includes, project
#include <hip/hip_runtime.h>
using namespace cooperative_groups;
namespace cg = cooperative_groups;
// #define FILESIZE_CHAR 1048576
#define FILESIZE_CHAR 1048576
#define FILESIZE_INT FILESIZE_CHAR/4
__host__
void makeLUT(int N, int* LUT){
int M = N;
int even = 0;
int odd = 1;
int LUTsize = N*(log2((double)N)*2 - 2);
for (int i =0; i < LUTsize/2; i+=N){
for (int j=0; j<N; j+=M){
for (int k =0; k<M/2; k++){
LUT[i+j+k] = even;
even+=2;
}
for (int k =M/2; k<M; k++){
LUT[i+j+k] = odd;
odd+=2;
}
} even=0; odd=1; M = M/2;
}
for (int x=LUTsize-N, i=LUTsize/2; i<LUTsize;i+=N, x-=N){
for(int j=0; j<N; j++){
int newIndex = LUT[x+j-LUTsize/2];
LUT[newIndex + i] = j;
}
}
return;
}
int createMask(int n)
{
int r = 0;
for (int i=0; i<n; i++)
r |= 1 << i;
return r;
}
__global__
void benes(int N, int block, char* network, int* LUT, volatile int* valid, int mask, int* data, char* output){
int idx = threadIdx.x;
int in1, in2, in1_index, in2_index;
int readOffset=0;
int fileSize = FILESIZE_INT/2;
int readOffsetSecondNet=fileSize;
thread_group g = tiled_partition(this_thread_block(), 2); //stops working after 32?
if(blockIdx.x == 0){
while(readOffset < fileSize){
in1 = data[idx*2 + readOffset];
in2 = data[idx*2+1 + readOffset];
readOffset+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
g.sync();
// __syncthreads();
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
}
}
else if ( blockIdx.x < block) {
while(readOffset < fileSize){
while((valid[idx + (blockIdx.x)*(N/2)])==0);
in1_index = LUT[idx*2 + (blockIdx.x-1)*N];
in2_index = LUT[idx*2 + (blockIdx.x-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
valid[idx + (blockIdx.x)*(N/2)] = 0;// valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
if (blockIdx.x != gridDim.x - 1 && blockIdx.x != block-1){
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
g.sync();
// __syncthreads();
}
else {
output[idx*2 + readOffset] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffset] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffset += N;
}
}
else if(blockIdx.x == block){
while(readOffsetSecondNet < FILESIZE_INT){
in1 = data[idx*2 + readOffsetSecondNet];
in2 = data[idx*2+1 + readOffsetSecondNet];
readOffsetSecondNet+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
}
}
else{
while(readOffsetSecondNet < FILESIZE_INT){
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
while((valid[idx + (blockIdx.x)*(N/2)])==0);
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
in1_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N];
in2_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
// printf("Block %d thread %d consumed %d %d\n", blockIdx.x,threadIdx.x, in1, in2);
valid[idx + (blockIdx.x)*(N/2)] = 0; //valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
//printf("waiting for next block %d to consume\n", blockIdx.x + 1);
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
// printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
//printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
if (blockIdx.x != gridDim.x - 1){
valid[idx + (blockIdx.x+1)*(N/2)]=1; //valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
//printf("valid:%d index:%d\n",valid[idx + (blockIdx.x+1)*N],idx + (blockIdx.x+1)*N);
}
else {
output[idx*2 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffsetSecondNet += N;
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Usage: %s <input.txt> <size>\n", argv[0]);
return 1;
}
std::ifstream file(argv[1], std::ios::binary);
if (!file) {
printf("Could not open input file\n");
return 1;
}
int N = atoi(argv[2]);
if (FILESIZE_INT<N)
N = FILESIZE_INT;
int blockSize = N/2;
int blocks = 2*log2((double)N)-1;
int b = 2*log2((double)N)-1;
int LUTsize = N*(log2((double)N)*2 - 2);
int numBlocks;
if (FILESIZE_INT <= N)
numBlocks = blocks;
else
numBlocks = 2*blocks;
char* network;
hipMallocManaged(&network,N*(numBlocks+1)*sizeof(char));
memset(network,0,N*(numBlocks+1)*sizeof(char));
int* LUT;
hipMallocManaged(&LUT,LUTsize*sizeof(int));
makeLUT(N,LUT);
int mask = createMask(log2((double)N));
int *valid;
hipMallocManaged(&valid,(N/2)*(numBlocks)*sizeof(int));
memset(valid,0,(N/2)*(numBlocks+1)*sizeof(int));
for(int i = 0; i < N/2; i++)
valid[i] = 1;
char* data;
hipMallocManaged(&data,FILESIZE_CHAR*sizeof(char));
memset(data,0,FILESIZE_CHAR*sizeof(char));
file.read(data, FILESIZE_CHAR*sizeof(char));
file.close();
int* idata;
hipMallocManaged(&idata,FILESIZE_CHAR*sizeof(char));
memcpy(idata, data, FILESIZE_CHAR*sizeof(char));
char* output;
hipMallocManaged(&output,FILESIZE_CHAR*sizeof(char));
memset(output,0,FILESIZE_CHAR*sizeof(char));
benes<<<numBlocks,blockSize>>>(N, blocks, network, LUT, valid, mask, idata, output);
hipDeviceSynchronize();
// printf("The input is:");
// for (int i = 0; i < FILESIZE_INT; i++){
// if (i%N == 0) printf("\n");
// printf("%d ", idata[i]);
// }
// printf("\n\n");
for (int i = 0; i < FILESIZE_INT-1; i++){
if ((i%N != N-1) && (output[i+1]!=0)) {
if((mask & output[i+1]) < (mask & output[i])){
printf("ERROR in routing at output %d %d %d\n",i ,mask & output[i+1],mask &output[i] );
return 1;
}
}
}
printf("Routing was successful!\n");
hipFree(valid);
hipFree(LUT);
hipFree(network);
hipFree(data);
hipFree(idata);
hipFree(output);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5benesiiPcPiPViiS0_S_
.globl _Z5benesiiPcPiPViiS0_S_
.p2align 8
.type _Z5benesiiPcPiPViiS0_S_,@function
_Z5benesiiPcPiPViiS0_S_:
s_clause 0x4
s_load_b32 s12, s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x8
s_load_b64 s[6:7], s[0:1], 0x18
s_load_b32 s13, s[0:1], 0x20
s_load_b64 s[4:5], s[0:1], 0x28
s_cmp_lg_u32 s15, 0
s_cbranch_scc0 .LBB0_17
s_clause 0x3
s_load_b32 s14, s[0:1], 0x4
s_load_b64 s[10:11], s[0:1], 0x10
s_load_b64 s[8:9], s[0:1], 0x30
s_load_b32 s0, s[0:1], 0x38
s_mov_b32 s1, -1
s_waitcnt lgkmcnt(0)
s_cmp_ge_u32 s15, s14
s_cbranch_scc0 .LBB0_25
s_cmp_eq_u32 s15, s14
s_cbranch_scc1 .LBB0_19
v_cvt_f32_u32_e32 v1, s14
s_sub_i32 s16, 0, s14
s_lshr_b32 s17, s12, 31
v_mov_b32_e32 v14, 0
s_add_i32 s17, s12, s17
v_rcp_iflag_f32_e32 v1, v1
s_ashr_i32 s17, s17, 1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v1, v1
v_readfirstlane_b32 s1, v1
v_mad_u64_u32 v[10:11], null, s17, s15, v[0:1]
v_mov_b32_e32 v11, 0
v_lshlrev_b32_e32 v1, 1, v0
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s16, s16, s1
s_mul_hi_u32 s16, s1, s16
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[10:11]
s_add_i32 s1, s1, s16
s_mul_hi_u32 s1, s15, s1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_mul_i32 s1, s1, s14
v_add_co_u32 v2, vcc_lo, s6, v2
s_sub_i32 s1, s15, s1
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
s_sub_i32 s16, s1, s14
s_cmp_ge_u32 s1, s14
s_cselect_b32 s1, s16, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_sub_i32 s16, s1, s14
s_cmp_ge_u32 s1, s14
s_cselect_b32 s1, s16, s1
s_add_i32 s16, s15, 1
s_add_i32 s1, s1, -1
v_mad_u64_u32 v[6:7], null, s16, s12, v[1:2]
v_mad_u64_u32 v[7:8], null, s1, s12, v[1:2]
s_add_i32 s18, s0, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
s_cmp_lg_u32 s15, s18
v_add_nc_u32_e32 v9, 1, v6
s_cselect_b32 s1, -1, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
v_add_nc_u32_e32 v10, 1, v7
v_mov_b32_e32 v8, v11
v_mad_u64_u32 v[4:5], null, s17, s16, v[0:1]
v_mov_b32_e32 v5, v11
v_or_b32_e32 v15, 1, v1
v_lshlrev_b64 v[16:17], 2, v[7:8]
s_mov_b32 s17, 0x20000
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[12:13], 2, v[4:5]
v_add_co_u32 v4, s16, s2, v6
v_add_co_ci_u32_e64 v5, null, s3, 0, s16
v_add_co_u32 v6, s16, s2, v9
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v8, vcc_lo, s6, v12
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v13, vcc_lo
v_lshlrev_b64 v[12:13], 2, v[10:11]
v_add_co_u32 v10, vcc_lo, s10, v16
v_mov_b32_e32 v16, 1
v_add_co_ci_u32_e32 v11, vcc_lo, s11, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v12, vcc_lo, s10, v12
v_add_co_ci_u32_e64 v7, null, s3, 0, s16
v_add_co_ci_u32_e32 v13, vcc_lo, s11, v13, vcc_lo
s_mul_i32 s16, s15, s12
s_branch .LBB0_5
.LBB0_4:
s_add_i32 s17, s17, s12
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lt_i32 s17, 0x40000
s_cbranch_scc0 .LBB0_18
.LBB0_5:
s_mov_b32 s18, 0
.LBB0_6:
flat_load_b32 v17, v[2:3] glc dlc
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, 0, v17
s_or_b32 s18, vcc_lo, s18
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s18
s_cbranch_execnz .LBB0_6
s_or_b32 exec_lo, exec_lo, s18
s_clause 0x1
global_load_b32 v17, v[10:11], off
global_load_b32 v18, v[12:13], off
s_mov_b32 s18, 0
s_waitcnt vmcnt(1)
v_add_nc_u32_e32 v17, s16, v17
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v18, s16, v18
s_clause 0x1
global_load_u8 v17, v17, s[2:3]
global_load_u8 v18, v18, s[2:3]
flat_store_b32 v[2:3], v14 dlc
s_waitcnt_vscnt null, 0x0
.LBB0_8:
flat_load_b32 v19, v[8:9] glc dlc
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, 1, v19
s_or_b32 s18, vcc_lo, s18
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s18
s_cbranch_execnz .LBB0_8
s_or_b32 exec_lo, exec_lo, s18
v_bfe_i32 v19, v17, 0, 8
v_bfe_i32 v20, v18, 0, 8
s_mov_b32 s18, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v19, s13, v19
v_and_b32_e32 v20, s13, v20
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_ge_i32_e64 v19, v20
s_xor_b32 s18, exec_lo, s18
s_clause 0x1
global_store_b8 v[4:5], v18, off
global_store_b8 v[6:7], v17, off
s_and_not1_saveexec_b32 s18, s18
s_clause 0x1
global_store_b8 v[4:5], v17, off
global_store_b8 v[6:7], v18, off
s_or_b32 exec_lo, exec_lo, s18
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s1
s_mov_b32 s18, -1
s_cbranch_vccnz .LBB0_15
s_mov_b32 s18, 0
flat_store_b32 v[8:9], v16 dlc
s_waitcnt_vscnt null, 0x0
s_waitcnt lgkmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
.LBB0_15:
s_and_not1_b32 vcc_lo, exec_lo, s18
s_cbranch_vccnz .LBB0_4
global_load_u8 v19, v[4:5], off
v_add_nc_u32_e32 v17, s17, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v18, 31, v17
v_add_co_u32 v17, vcc_lo, s8, v17
v_add_co_ci_u32_e32 v18, vcc_lo, s9, v18, vcc_lo
s_waitcnt vmcnt(0)
global_store_b8 v[17:18], v19, off
global_load_u8 v19, v[6:7], off
v_add_nc_u32_e32 v17, s17, v15
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v18, 31, v17
v_add_co_u32 v17, vcc_lo, s8, v17
v_add_co_ci_u32_e32 v18, vcc_lo, s9, v18, vcc_lo
s_waitcnt vmcnt(0)
global_store_b8 v[17:18], v19, off
s_branch .LBB0_4
.LBB0_17:
s_branch .LBB0_41
.LBB0_18:
s_mov_b32 s1, 0
.LBB0_19:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s1
s_cbranch_vccz .LBB0_24
s_lshr_b32 s1, s12, 31
s_add_i32 s16, s15, 1
s_add_i32 s1, s12, s1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s1, s1, 1
v_mad_u64_u32 v[2:3], null, s1, s16, v[0:1]
v_mov_b32_e32 v3, 0
v_lshlrev_b32_e32 v1, 1, v0
s_mov_b32 s1, 0x20000
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_or_b32_e32 v5, 1, v1
v_lshlrev_b64 v[6:7], 2, v[2:3]
v_mad_u64_u32 v[2:3], null, s16, s12, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s6, v6
v_mov_b32_e32 v6, 1
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v7, vcc_lo
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_21:
v_add_nc_u32_e32 v7, s1, v1
v_add_nc_u32_e32 v9, s1, v5
s_mov_b32 s16, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v8, 31, v7
v_ashrrev_i32_e32 v10, 31, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[7:8], 2, v[7:8]
v_lshlrev_b64 v[9:10], 2, v[9:10]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s4, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v9, vcc_lo, s4, v9
v_add_co_ci_u32_e32 v10, vcc_lo, s5, v10, vcc_lo
s_clause 0x1
global_load_b32 v7, v[7:8], off
global_load_b32 v8, v[9:10], off
.LBB0_22:
flat_load_b32 v9, v[3:4] glc dlc
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, 1, v9
s_or_b32 s16, vcc_lo, s16
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s16
s_cbranch_execnz .LBB0_22
s_or_b32 exec_lo, exec_lo, s16
v_and_b32_e32 v9, s13, v7
v_and_b32_e32 v10, s13, v8
s_add_i32 s1, s1, s12
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_gt_i32 s1, 0x3ffff
v_cmp_lt_i32_e32 vcc_lo, v9, v10
v_dual_cndmask_b32 v10, v8, v7 :: v_dual_add_nc_u32 v9, 1, v2
v_cndmask_b32_e32 v7, v7, v8, vcc_lo
s_clause 0x1
global_store_b8 v2, v10, s[2:3]
global_store_b8 v9, v7, s[2:3]
flat_store_b32 v[3:4], v6 dlc
s_waitcnt_vscnt null, 0x0
s_waitcnt lgkmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_cbranch_scc0 .LBB0_21
.LBB0_24:
s_set_inst_prefetch_distance 0x2
s_mov_b32 s1, 0
.LBB0_25:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s1
s_cbranch_vccnz .LBB0_40
s_lshr_b32 s1, s12, 31
v_dual_mov_b32 v16, 1 :: v_dual_lshlrev_b32 v1, 1, v0
s_add_i32 s1, s12, s1
s_add_i32 s16, s15, -1
s_ashr_i32 s1, s1, 1
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, s16, s12, v[1:2]
v_mad_u64_u32 v[4:5], null, s1, s15, v[0:1]
v_mov_b32_e32 v5, 0
s_add_i32 s16, s15, 1
s_add_i32 s0, s0, -1
v_mad_u64_u32 v[8:9], null, s1, s16, v[0:1]
s_cmp_lg_u32 s15, s0
s_delay_alu instid0(VALU_DEP_3)
v_lshlrev_b64 v[6:7], 2, v[4:5]
v_dual_mov_b32 v3, v5 :: v_dual_add_nc_u32 v4, 1, v2
v_mov_b32_e32 v9, v5
s_cselect_b32 s0, -1, 0
s_add_i32 s14, s14, -1
v_or_b32_e32 v14, 1, v1
v_lshlrev_b64 v[10:11], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v7, vcc_lo
v_lshlrev_b64 v[6:7], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s10, v10
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v11, vcc_lo
v_mad_u64_u32 v[10:11], null, s16, s12, v[1:2]
v_lshlrev_b64 v[8:9], 2, v[8:9]
v_add_co_u32 v6, vcc_lo, s10, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s11, v7, vcc_lo
s_cmp_lg_u32 s15, s14
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
v_add_nc_u32_e32 v12, 1, v10
v_add_co_u32 v10, s1, s2, v10
v_add_co_u32 v8, vcc_lo, s6, v8
v_add_co_ci_u32_e64 v11, null, s3, 0, s1
v_add_co_u32 v12, s1, s2, v12
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
v_add_co_ci_u32_e64 v13, null, s3, 0, s1
v_mov_b32_e32 v15, 0
s_cselect_b32 s1, -1, 0
s_mul_i32 s15, s15, s12
s_and_b32 s0, s0, s1
s_mov_b32 s1, 0
s_branch .LBB0_28
.LBB0_27:
s_add_i32 s1, s1, s12
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_gt_i32 s1, 0x1ffff
s_cbranch_scc1 .LBB0_40
.LBB0_28:
s_mov_b32 s10, 0
.LBB0_29:
flat_load_b32 v17, v[2:3] glc dlc
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, 0, v17
s_or_b32 s10, vcc_lo, s10
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execnz .LBB0_29
s_or_b32 exec_lo, exec_lo, s10
s_clause 0x1
global_load_b32 v17, v[4:5], off
global_load_b32 v18, v[6:7], off
s_mov_b32 s10, 0
s_waitcnt vmcnt(1)
v_add_nc_u32_e32 v17, s15, v17
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v18, s15, v18
s_clause 0x1
global_load_u8 v17, v17, s[2:3]
global_load_u8 v18, v18, s[2:3]
flat_store_b32 v[2:3], v15 dlc
s_waitcnt_vscnt null, 0x0
.LBB0_31:
flat_load_b32 v19, v[8:9] glc dlc
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, 1, v19
s_or_b32 s10, vcc_lo, s10
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s10
s_cbranch_execnz .LBB0_31
s_or_b32 exec_lo, exec_lo, s10
v_bfe_i32 v19, v17, 0, 8
v_bfe_i32 v20, v18, 0, 8
s_mov_b32 s10, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v19, s13, v19
v_and_b32_e32 v20, s13, v20
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_ge_i32_e64 v19, v20
s_xor_b32 s10, exec_lo, s10
s_clause 0x1
global_store_b8 v[10:11], v18, off
global_store_b8 v[12:13], v17, off
s_and_not1_saveexec_b32 s10, s10
s_clause 0x1
global_store_b8 v[10:11], v17, off
global_store_b8 v[12:13], v18, off
s_or_b32 exec_lo, exec_lo, s10
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 vcc_lo, exec_lo, s0
s_mov_b32 s10, -1
s_cbranch_vccnz .LBB0_38
s_mov_b32 s10, 0
flat_store_b32 v[8:9], v16 dlc
s_waitcnt_vscnt null, 0x0
s_waitcnt lgkmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
.LBB0_38:
s_and_not1_b32 vcc_lo, exec_lo, s10
s_cbranch_vccnz .LBB0_27
global_load_u8 v19, v[10:11], off
v_add_nc_u32_e32 v17, s1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v18, 31, v17
v_add_co_u32 v17, vcc_lo, s8, v17
v_add_co_ci_u32_e32 v18, vcc_lo, s9, v18, vcc_lo
s_waitcnt vmcnt(0)
global_store_b8 v[17:18], v19, off
global_load_u8 v19, v[12:13], off
v_add_nc_u32_e32 v17, s1, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v18, 31, v17
v_add_co_u32 v17, vcc_lo, s8, v17
v_add_co_ci_u32_e32 v18, vcc_lo, s9, v18, vcc_lo
s_waitcnt vmcnt(0)
global_store_b8 v[17:18], v19, off
s_branch .LBB0_27
.LBB0_40:
s_cbranch_execnz .LBB0_45
.LBB0_41:
s_waitcnt lgkmcnt(0)
s_lshr_b32 s0, s12, 31
v_dual_mov_b32 v5, 1 :: v_dual_lshlrev_b32 v2, 1, v0
s_add_i32 s0, s12, s0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s0, s0, 1
v_dual_mov_b32 v4, 0 :: v_dual_add_nc_u32 v3, s0, v0
s_mov_b32 s0, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[0:1], 2, v[3:4]
v_or_b32_e32 v3, 1, v2
v_add_nc_u32_e32 v4, s12, v2
v_add_co_u32 v0, vcc_lo, s6, v0
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_42:
v_add_nc_u32_e32 v6, s0, v2
v_add_nc_u32_e32 v8, s0, v3
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v7, 31, v6
v_ashrrev_i32_e32 v9, 31, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_lshlrev_b64 v[8:9], 2, v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, s4, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v8, vcc_lo, s4, v8
v_add_co_ci_u32_e32 v9, vcc_lo, s5, v9, vcc_lo
s_clause 0x1
global_load_b32 v6, v[6:7], off
global_load_b32 v7, v[8:9], off
.LBB0_43:
flat_load_b32 v8, v[0:1] glc dlc
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_ne_u32_e32 vcc_lo, 1, v8
s_or_b32 s1, vcc_lo, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_43
s_or_b32 exec_lo, exec_lo, s1
v_and_b32_e32 v8, s13, v6
v_and_b32_e32 v9, s13, v7
s_add_i32 s0, s0, s12
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_gt_i32 s0, 0x1ffff
v_cmp_lt_i32_e32 vcc_lo, v8, v9
v_dual_cndmask_b32 v9, v7, v6 :: v_dual_add_nc_u32 v8, 1, v4
v_cndmask_b32_e32 v6, v6, v7, vcc_lo
s_clause 0x1
global_store_b8 v4, v9, s[2:3]
global_store_b8 v8, v6, s[2:3]
s_waitcnt_vscnt null, 0x0
buffer_gl1_inv
buffer_gl0_inv
flat_store_b32 v[0:1], v5 dlc
s_waitcnt_vscnt null, 0x0
s_cbranch_scc0 .LBB0_42
.LBB0_45:
s_set_inst_prefetch_distance 0x2
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5benesiiPcPiPViiS0_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 21
.amdhsa_next_free_sgpr 19
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5benesiiPcPiPViiS0_S_, .Lfunc_end0-_Z5benesiiPcPiPViiS0_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .offset: 56
.size: 4
.value_kind: hidden_block_count_x
- .offset: 60
.size: 4
.value_kind: hidden_block_count_y
- .offset: 64
.size: 4
.value_kind: hidden_block_count_z
- .offset: 68
.size: 2
.value_kind: hidden_group_size_x
- .offset: 70
.size: 2
.value_kind: hidden_group_size_y
- .offset: 72
.size: 2
.value_kind: hidden_group_size_z
- .offset: 74
.size: 2
.value_kind: hidden_remainder_x
- .offset: 76
.size: 2
.value_kind: hidden_remainder_y
- .offset: 78
.size: 2
.value_kind: hidden_remainder_z
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 120
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5benesiiPcPiPViiS0_S_
.private_segment_fixed_size: 0
.sgpr_count: 21
.sgpr_spill_count: 0
.symbol: _Z5benesiiPcPiPViiS0_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 21
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //put C:/Users/molly/Desktop/289Q/project/main.cu
//nvcc -std=c++11 main.cu
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include <hip/hip_cooperative_groups.h>
#include <hip/hip_cooperative_groups.h>
// includes, project
#include <hip/hip_runtime.h>
using namespace cooperative_groups;
namespace cg = cooperative_groups;
// #define FILESIZE_CHAR 1048576
#define FILESIZE_CHAR 1048576
#define FILESIZE_INT FILESIZE_CHAR/4
__host__
void makeLUT(int N, int* LUT){
int M = N;
int even = 0;
int odd = 1;
int LUTsize = N*(log2((double)N)*2 - 2);
for (int i =0; i < LUTsize/2; i+=N){
for (int j=0; j<N; j+=M){
for (int k =0; k<M/2; k++){
LUT[i+j+k] = even;
even+=2;
}
for (int k =M/2; k<M; k++){
LUT[i+j+k] = odd;
odd+=2;
}
} even=0; odd=1; M = M/2;
}
for (int x=LUTsize-N, i=LUTsize/2; i<LUTsize;i+=N, x-=N){
for(int j=0; j<N; j++){
int newIndex = LUT[x+j-LUTsize/2];
LUT[newIndex + i] = j;
}
}
return;
}
int createMask(int n)
{
int r = 0;
for (int i=0; i<n; i++)
r |= 1 << i;
return r;
}
__global__
void benes(int N, int block, char* network, int* LUT, volatile int* valid, int mask, int* data, char* output){
int idx = threadIdx.x;
int in1, in2, in1_index, in2_index;
int readOffset=0;
int fileSize = FILESIZE_INT/2;
int readOffsetSecondNet=fileSize;
thread_group g = tiled_partition(this_thread_block(), 2); //stops working after 32?
if(blockIdx.x == 0){
while(readOffset < fileSize){
in1 = data[idx*2 + readOffset];
in2 = data[idx*2+1 + readOffset];
readOffset+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
g.sync();
// __syncthreads();
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
}
}
else if ( blockIdx.x < block) {
while(readOffset < fileSize){
while((valid[idx + (blockIdx.x)*(N/2)])==0);
in1_index = LUT[idx*2 + (blockIdx.x-1)*N];
in2_index = LUT[idx*2 + (blockIdx.x-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
valid[idx + (blockIdx.x)*(N/2)] = 0;// valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
if (blockIdx.x != gridDim.x - 1 && blockIdx.x != block-1){
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
g.sync();
// __syncthreads();
}
else {
output[idx*2 + readOffset] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffset] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffset += N;
}
}
else if(blockIdx.x == block){
while(readOffsetSecondNet < FILESIZE_INT){
in1 = data[idx*2 + readOffsetSecondNet];
in2 = data[idx*2+1 + readOffsetSecondNet];
readOffsetSecondNet+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
}
}
else{
while(readOffsetSecondNet < FILESIZE_INT){
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
while((valid[idx + (blockIdx.x)*(N/2)])==0);
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
in1_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N];
in2_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
// printf("Block %d thread %d consumed %d %d\n", blockIdx.x,threadIdx.x, in1, in2);
valid[idx + (blockIdx.x)*(N/2)] = 0; //valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
//printf("waiting for next block %d to consume\n", blockIdx.x + 1);
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
// printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
//printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
if (blockIdx.x != gridDim.x - 1){
valid[idx + (blockIdx.x+1)*(N/2)]=1; //valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
//printf("valid:%d index:%d\n",valid[idx + (blockIdx.x+1)*N],idx + (blockIdx.x+1)*N);
}
else {
output[idx*2 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffsetSecondNet += N;
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Usage: %s <input.txt> <size>\n", argv[0]);
return 1;
}
std::ifstream file(argv[1], std::ios::binary);
if (!file) {
printf("Could not open input file\n");
return 1;
}
int N = atoi(argv[2]);
if (FILESIZE_INT<N)
N = FILESIZE_INT;
int blockSize = N/2;
int blocks = 2*log2((double)N)-1;
int b = 2*log2((double)N)-1;
int LUTsize = N*(log2((double)N)*2 - 2);
int numBlocks;
if (FILESIZE_INT <= N)
numBlocks = blocks;
else
numBlocks = 2*blocks;
char* network;
hipMallocManaged(&network,N*(numBlocks+1)*sizeof(char));
memset(network,0,N*(numBlocks+1)*sizeof(char));
int* LUT;
hipMallocManaged(&LUT,LUTsize*sizeof(int));
makeLUT(N,LUT);
int mask = createMask(log2((double)N));
int *valid;
hipMallocManaged(&valid,(N/2)*(numBlocks)*sizeof(int));
memset(valid,0,(N/2)*(numBlocks+1)*sizeof(int));
for(int i = 0; i < N/2; i++)
valid[i] = 1;
char* data;
hipMallocManaged(&data,FILESIZE_CHAR*sizeof(char));
memset(data,0,FILESIZE_CHAR*sizeof(char));
file.read(data, FILESIZE_CHAR*sizeof(char));
file.close();
int* idata;
hipMallocManaged(&idata,FILESIZE_CHAR*sizeof(char));
memcpy(idata, data, FILESIZE_CHAR*sizeof(char));
char* output;
hipMallocManaged(&output,FILESIZE_CHAR*sizeof(char));
memset(output,0,FILESIZE_CHAR*sizeof(char));
benes<<<numBlocks,blockSize>>>(N, blocks, network, LUT, valid, mask, idata, output);
hipDeviceSynchronize();
// printf("The input is:");
// for (int i = 0; i < FILESIZE_INT; i++){
// if (i%N == 0) printf("\n");
// printf("%d ", idata[i]);
// }
// printf("\n\n");
for (int i = 0; i < FILESIZE_INT-1; i++){
if ((i%N != N-1) && (output[i+1]!=0)) {
if((mask & output[i+1]) < (mask & output[i])){
printf("ERROR in routing at output %d %d %d\n",i ,mask & output[i+1],mask &output[i] );
return 1;
}
}
}
printf("Routing was successful!\n");
hipFree(valid);
hipFree(LUT);
hipFree(network);
hipFree(data);
hipFree(idata);
hipFree(output);
} | .text
.file "main.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z7makeLUTiPi
.LCPI0_0:
.quad 0xc000000000000000 # double -2
.text
.globl _Z7makeLUTiPi
.p2align 4, 0x90
.type _Z7makeLUTiPi,@function
_Z7makeLUTiPi: # @_Z7makeLUTiPi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
cvtsi2sd %edi, %xmm0
movsd %xmm0, (%rsp) # 8-byte Spill
callq log2
addsd %xmm0, %xmm0
addsd .LCPI0_0(%rip), %xmm0
mulsd (%rsp), %xmm0 # 8-byte Folded Reload
cvttsd2si %xmm0, %eax
movl %eax, %ecx
shrl $31, %ecx
addl %eax, %ecx
sarl %ecx
movl %eax, 8(%rsp) # 4-byte Spill
cmpl $2, %eax
jl .LBB0_10
# %bb.1: # %.preheader68.lr.ph
movslq %ebp, %rdx
movl %ecx, %eax
movq %rax, (%rsp) # 8-byte Spill
leaq (,%rdx,4), %rax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %r8d, %r8d
movq %rbx, %rsi
movl %ebp, %r10d
movl %ebp, 12(%rsp) # 4-byte Spill
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_9: # %._crit_edge80
# in Loop: Header=BB0_2 Depth=1
movl %r10d, %eax
shrl $31, %eax
addl %r10d, %eax
sarl %eax
addq %rdx, %r8
addq 16(%rsp), %rsi # 8-byte Folded Reload
movl %eax, %r10d
cmpq (%rsp), %r8 # 8-byte Folded Reload
movl 12(%rsp), %ebp # 4-byte Reload
jge .LBB0_10
.LBB0_2: # %.preheader68
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
# Child Loop BB0_6 Depth 3
# Child Loop BB0_17 Depth 3
testl %ebp, %ebp
jle .LBB0_9
# %bb.3: # %.preheader67.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r10d, %r11d
shrl $31, %r11d
addl %r10d, %r11d
sarl %r11d
movslq %r11d, %r14
movslq %r10d, %r15
leaq (,%r15,4), %r12
movl $1, %r13d
xorl %eax, %eax
movq %rsi, %r9
xorl %edi, %edi
jmp .LBB0_4
.p2align 4, 0x90
.LBB0_8: # %._crit_edge
# in Loop: Header=BB0_4 Depth=2
addq %r15, %rax
addq %r12, %r9
cmpq %rdx, %rax
jge .LBB0_9
.LBB0_4: # %.preheader67
# Parent Loop BB0_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB0_6 Depth 3
# Child Loop BB0_17 Depth 3
cmpl $2, %r10d
jl .LBB0_7
# %bb.5: # %.lr.ph
# in Loop: Header=BB0_4 Depth=2
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_6: # Parent Loop BB0_2 Depth=1
# Parent Loop BB0_4 Depth=2
# => This Inner Loop Header: Depth=3
movl %edi, (%r9,%rbp,4)
addl $2, %edi
incq %rbp
cmpq %rbp, %r11
jne .LBB0_6
.LBB0_7: # %.preheader66
# in Loop: Header=BB0_4 Depth=2
movq %r14, %rbp
cmpl %r10d, %r11d
jge .LBB0_8
.p2align 4, 0x90
.LBB0_17: # Parent Loop BB0_2 Depth=1
# Parent Loop BB0_4 Depth=2
# => This Inner Loop Header: Depth=3
movl %r13d, (%r9,%rbp,4)
addl $2, %r13d
incq %rbp
cmpq %rbp, %r15
jne .LBB0_17
jmp .LBB0_8
.LBB0_10: # %.preheader65
movl 8(%rsp), %r10d # 4-byte Reload
movl %r10d, %eax
subl %ecx, %eax
jle .LBB0_16
# %bb.11: # %.preheader.lr.ph
movl %ebp, %edx
subl %ebp, %eax
movl %ebp, %esi
negl %esi
jmp .LBB0_12
.p2align 4, 0x90
.LBB0_15: # %._crit_edge86
# in Loop: Header=BB0_12 Depth=1
addl %ebp, %ecx
addq %rsi, %rax
cmpl %r10d, %ecx
jge .LBB0_16
.LBB0_12: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_14 Depth 2
testl %ebp, %ebp
jle .LBB0_15
# %bb.13: # %.lr.ph85
# in Loop: Header=BB0_12 Depth=1
movslq %ecx, %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB0_14: # Parent Loop BB0_12 Depth=1
# => This Inner Loop Header: Depth=2
leal (%rax,%r8), %r9d
movslq %r9d, %r9
movslq (%rbx,%r9,4), %r9
addq %rdi, %r9
movl %r8d, (%rbx,%r9,4)
incq %r8
cmpq %r8, %rdx
jne .LBB0_14
jmp .LBB0_15
.LBB0_16: # %._crit_edge90
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z7makeLUTiPi, .Lfunc_end0-_Z7makeLUTiPi
.cfi_endproc
# -- End function
.globl _Z10createMaski # -- Begin function _Z10createMaski
.p2align 4, 0x90
.type _Z10createMaski,@function
_Z10createMaski: # @_Z10createMaski
.cfi_startproc
# %bb.0:
testl %edi, %edi
jle .LBB1_1
# %bb.3: # %.lr.ph.preheader
xorl %ecx, %ecx
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
btsl %ecx, %eax
incl %ecx
cmpl %ecx, %edi
jne .LBB1_4
# %bb.2: # %._crit_edge
retq
.LBB1_1:
xorl %eax, %eax
retq
.Lfunc_end1:
.size _Z10createMaski, .Lfunc_end1-_Z10createMaski
.cfi_endproc
# -- End function
.globl _Z20__device_stub__benesiiPcPiPViiS0_S_ # -- Begin function _Z20__device_stub__benesiiPcPiPViiS0_S_
.p2align 4, 0x90
.type _Z20__device_stub__benesiiPcPiPViiS0_S_,@function
_Z20__device_stub__benesiiPcPiPViiS0_S_: # @_Z20__device_stub__benesiiPcPiPViiS0_S_
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z5benesiiPcPiPViiS0_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end2:
.size _Z20__device_stub__benesiiPcPiPViiS0_S_, .Lfunc_end2-_Z20__device_stub__benesiiPcPiPViiS0_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0xbff0000000000000 # double -1
.LCPI3_1:
.quad 0xc000000000000000 # double -2
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $760, %rsp # imm = 0x2F8
.cfi_def_cfa_offset 816
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $3, %edi
jne .LBB3_1
# %bb.2:
movq %rsi, %rbx
movq 8(%rsi), %rsi
.cfi_escape 0x2e, 0x00
leaq 240(%rsp), %rdi
movl $4, %edx
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode
movq 240(%rsp), %rax
movq -24(%rax), %rax
testb $5, 272(%rsp,%rax)
je .LBB3_4
# %bb.3:
.cfi_escape 0x2e, 0x00
movl $.Lstr.1, %edi
callq puts@PLT
movl $1, %ebx
jmp .LBB3_46
.LBB3_1:
movq (%rsi), %rsi
.cfi_escape 0x2e, 0x00
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $1, %ebx
jmp .LBB3_47
.LBB3_4:
movq 16(%rbx), %rdi
.cfi_escape 0x2e, 0x00
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
cmpl $262144, %r14d # imm = 0x40000
movl $262144, %ebx # imm = 0x40000
cmovll %r14d, %ebx
cvtsi2sd %ebx, %xmm0
.cfi_escape 0x2e, 0x00
movsd %xmm0, (%rsp) # 8-byte Spill
callq log2
addsd %xmm0, %xmm0
addsd .LCPI3_0(%rip), %xmm0
cvttsd2si %xmm0, %ebp
testl %r14d, %r14d
jle .LBB3_5
.LBB3_6: # %cdce.end
.cfi_escape 0x2e, 0x00
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq log2
movsd %xmm0, 80(%rsp) # 8-byte Spill
cmpl $262144, %r14d # imm = 0x40000
setl %cl
movl %ebp, 56(%rsp) # 4-byte Spill
movl %ebp, %eax
shll %cl, %eax
movq %rax, 72(%rsp) # 8-byte Spill
leal 1(%rax), %r12d
movl %r12d, %eax
imull %ebx, %eax
movslq %eax, %r15
.Ltmp0:
.cfi_escape 0x2e, 0x00
leaq 48(%rsp), %rdi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
.Ltmp1:
# %bb.7: # %_ZL16hipMallocManagedIcE10hipError_tPPT_mj.exit
movsd 80(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
addsd %xmm0, %xmm0
addsd .LCPI3_1(%rip), %xmm0
mulsd (%rsp), %xmm0 # 8-byte Folded Reload
cvttsd2si %xmm0, %r13d
movq 48(%rsp), %rdi
.cfi_escape 0x2e, 0x00
xorl %ebp, %ebp
xorl %esi, %esi
movq %r15, %rdx
callq memset@PLT
movslq %r13d, %rsi
shlq $2, %rsi
.Ltmp3:
.cfi_escape 0x2e, 0x00
leaq 40(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
.Ltmp4:
# %bb.8: # %_ZL16hipMallocManagedIiE10hipError_tPPT_mj.exit
movl %ebx, %r15d
shrl $31, %r15d
addl %ebx, %r15d
sarl %r15d
movq 40(%rsp), %rsi
.cfi_escape 0x2e, 0x00
movl %ebx, %edi
callq _Z7makeLUTiPi
.cfi_escape 0x2e, 0x00
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq log2
cvttsd2si %xmm0, %eax
testl %eax, %eax
movq 72(%rsp), %r13 # 8-byte Reload
jle .LBB3_11
# %bb.9: # %.lr.ph.i.preheader
xorl %ecx, %ecx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB3_10: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
btsl %ecx, %ebp
incl %ecx
cmpl %ecx, %eax
jne .LBB3_10
.LBB3_11: # %_Z10createMaski.exit
movl %r13d, %eax
imull %r15d, %eax
movslq %eax, %rsi
shlq $2, %rsi
.Ltmp6:
.cfi_escape 0x2e, 0x00
leaq 16(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
.Ltmp7:
# %bb.12: # %_ZL16hipMallocManagedIiE10hipError_tPPT_mj.exit80
movq 16(%rsp), %rdi
imull %r15d, %r12d
movslq %r12d, %rdx
shlq $2, %rdx
.cfi_escape 0x2e, 0x00
xorl %esi, %esi
callq memset@PLT
cmpl $2, %r14d
jl .LBB3_15
# %bb.13: # %.lr.ph
movq 16(%rsp), %rax
cmpl $2, %r15d
movl $1, %ecx
cmovgel %r15d, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB3_14: # =>This Inner Loop Header: Depth=1
movl $1, (%rax,%rdx,4)
incq %rdx
cmpq %rdx, %rcx
jne .LBB3_14
.LBB3_15: # %._crit_edge
.Ltmp9:
.cfi_escape 0x2e, 0x00
leaq 32(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
movl $1, %edx
callq hipMallocManaged
.Ltmp10:
# %bb.16: # %_ZL16hipMallocManagedIcE10hipError_tPPT_mj.exit81
movq 32(%rsp), %r14
.cfi_escape 0x2e, 0x00
movl $1048576, %edx # imm = 0x100000
movq %r14, %rdi
xorl %esi, %esi
callq memset@PLT
.Ltmp11:
.cfi_escape 0x2e, 0x00
leaq 240(%rsp), %rdi
movl $1048576, %edx # imm = 0x100000
movq %r14, %rsi
callq _ZNSi4readEPcl
.Ltmp12:
# %bb.17:
leaq 256(%rsp), %rdi
.Ltmp13:
.cfi_escape 0x2e, 0x00
callq _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv
.Ltmp14:
# %bb.18: # %.noexc
testq %rax, %rax
jne .LBB3_20
# %bb.19:
movq 240(%rsp), %rax
movq -24(%rax), %rax
leaq (%rsp,%rax), %rdi
addq $240, %rdi
movl 272(%rsp,%rax), %esi
orl $4, %esi
.Ltmp15:
.cfi_escape 0x2e, 0x00
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.Ltmp16:
.LBB3_20: # %_ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv.exit
.Ltmp18:
.cfi_escape 0x2e, 0x00
leaq 24(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
movl $1, %edx
callq hipMallocManaged
.Ltmp19:
# %bb.21: # %_ZL16hipMallocManagedIiE10hipError_tPPT_mj.exit84
movq 24(%rsp), %rdi
movq 32(%rsp), %rsi
.cfi_escape 0x2e, 0x00
movl $1048576, %edx # imm = 0x100000
callq memcpy@PLT
.Ltmp21:
.cfi_escape 0x2e, 0x00
leaq 8(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
movl $1, %edx
callq hipMallocManaged
.Ltmp22:
# %bb.22: # %_ZL16hipMallocManagedIcE10hipError_tPPT_mj.exit86
movq 8(%rsp), %rdi
.cfi_escape 0x2e, 0x00
movl $1048576, %edx # imm = 0x100000
xorl %esi, %esi
callq memset@PLT
movl %r13d, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl %r15d, %edx
orq %rax, %rdx
.Ltmp23:
.cfi_escape 0x2e, 0x00
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
.Ltmp24:
# %bb.23:
testl %eax, %eax
jne .LBB3_26
# %bb.24:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 16(%rsp), %rdx
movq 24(%rsp), %rsi
movq 8(%rsp), %rdi
movl %ebx, 68(%rsp)
movl 56(%rsp), %r8d # 4-byte Reload
movl %r8d, 64(%rsp)
movq %rax, 168(%rsp)
movq %rcx, 160(%rsp)
movq %rdx, 152(%rsp)
movl %ebp, 60(%rsp)
movq %rsi, 144(%rsp)
movq %rdi, 136(%rsp)
leaq 68(%rsp), %rax
movq %rax, 176(%rsp)
leaq 64(%rsp), %rax
movq %rax, 184(%rsp)
leaq 168(%rsp), %rax
movq %rax, 192(%rsp)
leaq 160(%rsp), %rax
movq %rax, 200(%rsp)
leaq 152(%rsp), %rax
movq %rax, 208(%rsp)
leaq 60(%rsp), %rax
movq %rax, 216(%rsp)
leaq 144(%rsp), %rax
movq %rax, 224(%rsp)
leaq 136(%rsp), %rax
movq %rax, 232(%rsp)
.Ltmp25:
.cfi_escape 0x2e, 0x00
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
.Ltmp26:
# %bb.25: # %.noexc87
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
.Ltmp27:
.cfi_escape 0x2e, 0x10
leaq 176(%rsp), %r9
movl $_Z5benesiiPcPiPViiS0_S_, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.Ltmp28:
.LBB3_26:
.Ltmp29:
.cfi_escape 0x2e, 0x00
callq hipDeviceSynchronize
.Ltmp30:
# %bb.27: # %.preheader
leal -1(%rbx), %edi
movq 8(%rsp), %r8
xorl %esi, %esi
xorl %r14d, %r14d
jmp .LBB3_28
.p2align 4, 0x90
.LBB3_37: # in Loop: Header=BB3_28 Depth=1
cmpq $262142, %rsi # imm = 0x3FFFE
leaq 1(%rsi), %rax
setae %r14b
movq %rax, %rsi
cmpq $262143, %rax # imm = 0x3FFFF
je .LBB3_38
.LBB3_28: # =>This Inner Loop Header: Depth=1
movl %esi, %eax
cltd
idivl %ebx
cmpl %edi, %edx
je .LBB3_37
# %bb.29: # in Loop: Header=BB3_28 Depth=1
movsbl 1(%r8,%rsi), %edx
testl %edx, %edx
je .LBB3_37
# %bb.30: # in Loop: Header=BB3_28 Depth=1
andl %ebp, %edx
movsbl (%r8,%rsi), %ecx
andl %ebp, %ecx
cmpl %ecx, %edx
jge .LBB3_37
# %bb.31:
.cfi_escape 0x2e, 0x00
movl $.L.str.2, %edi
# kill: def $esi killed $esi killed $rsi
xorl %eax, %eax
callq printf
movl $1, %ebx
testb $1, %r14b
jne .LBB3_40
jmp .LBB3_46
.LBB3_38:
xorl %ebx, %ebx
testb $1, %r14b
je .LBB3_46
.LBB3_40:
.cfi_escape 0x2e, 0x00
movl $.Lstr, %edi
callq puts@PLT
movq 16(%rsp), %rdi
.Ltmp31:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp32:
# %bb.41:
movq 40(%rsp), %rdi
.Ltmp33:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp34:
# %bb.42:
movq 48(%rsp), %rdi
.Ltmp35:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp36:
# %bb.43:
movq 32(%rsp), %rdi
.Ltmp37:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp38:
# %bb.44:
movq 24(%rsp), %rdi
.Ltmp39:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp40:
# %bb.45:
movq 8(%rsp), %rdi
.Ltmp41:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp42:
.LBB3_46:
.cfi_escape 0x2e, 0x00
leaq 240(%rsp), %rdi
movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev
leaq 496(%rsp), %rdi
.cfi_escape 0x2e, 0x00
callq _ZNSt8ios_baseD2Ev
.LBB3_47:
movl %ebx, %eax
addq $760, %rsp # imm = 0x2F8
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_5: # %cdce.call
.cfi_def_cfa_offset 816
.cfi_escape 0x2e, 0x00
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq log2
jmp .LBB3_6
.LBB3_48:
.Ltmp20:
jmp .LBB3_49
.LBB3_34:
.Ltmp8:
jmp .LBB3_49
.LBB3_33:
.Ltmp5:
jmp .LBB3_49
.LBB3_32:
.Ltmp2:
jmp .LBB3_49
.LBB3_35:
.Ltmp17:
jmp .LBB3_49
.LBB3_36:
.Ltmp43:
.LBB3_49:
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
leaq 240(%rsp), %rdi
movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev
leaq 496(%rsp), %rdi
.cfi_escape 0x2e, 0x00
callq _ZNSt8ios_baseD2Ev
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table3:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1
.uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2
.byte 0 # On action: cleanup
.uleb128 .Ltmp1-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp3-.Ltmp1 # Call between .Ltmp1 and .Ltmp3
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp4-.Ltmp3 # Call between .Ltmp3 and .Ltmp4
.uleb128 .Ltmp5-.Lfunc_begin0 # jumps to .Ltmp5
.byte 0 # On action: cleanup
.uleb128 .Ltmp6-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp7-.Ltmp6 # Call between .Ltmp6 and .Ltmp7
.uleb128 .Ltmp8-.Lfunc_begin0 # jumps to .Ltmp8
.byte 0 # On action: cleanup
.uleb128 .Ltmp7-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp9-.Ltmp7 # Call between .Ltmp7 and .Ltmp9
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp9-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Ltmp10-.Ltmp9 # Call between .Ltmp9 and .Ltmp10
.uleb128 .Ltmp17-.Lfunc_begin0 # jumps to .Ltmp17
.byte 0 # On action: cleanup
.uleb128 .Ltmp10-.Lfunc_begin0 # >> Call Site 8 <<
.uleb128 .Ltmp11-.Ltmp10 # Call between .Ltmp10 and .Ltmp11
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp11-.Lfunc_begin0 # >> Call Site 9 <<
.uleb128 .Ltmp16-.Ltmp11 # Call between .Ltmp11 and .Ltmp16
.uleb128 .Ltmp17-.Lfunc_begin0 # jumps to .Ltmp17
.byte 0 # On action: cleanup
.uleb128 .Ltmp18-.Lfunc_begin0 # >> Call Site 10 <<
.uleb128 .Ltmp19-.Ltmp18 # Call between .Ltmp18 and .Ltmp19
.uleb128 .Ltmp20-.Lfunc_begin0 # jumps to .Ltmp20
.byte 0 # On action: cleanup
.uleb128 .Ltmp19-.Lfunc_begin0 # >> Call Site 11 <<
.uleb128 .Ltmp21-.Ltmp19 # Call between .Ltmp19 and .Ltmp21
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp21-.Lfunc_begin0 # >> Call Site 12 <<
.uleb128 .Ltmp22-.Ltmp21 # Call between .Ltmp21 and .Ltmp22
.uleb128 .Ltmp43-.Lfunc_begin0 # jumps to .Ltmp43
.byte 0 # On action: cleanup
.uleb128 .Ltmp22-.Lfunc_begin0 # >> Call Site 13 <<
.uleb128 .Ltmp23-.Ltmp22 # Call between .Ltmp22 and .Ltmp23
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp23-.Lfunc_begin0 # >> Call Site 14 <<
.uleb128 .Ltmp42-.Ltmp23 # Call between .Ltmp23 and .Ltmp42
.uleb128 .Ltmp43-.Lfunc_begin0 # jumps to .Ltmp43
.byte 0 # On action: cleanup
.uleb128 .Ltmp42-.Lfunc_begin0 # >> Call Site 15 <<
.uleb128 .Lfunc_end3-.Ltmp42 # Call between .Ltmp42 and .Lfunc_end3
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5benesiiPcPiPViiS0_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5benesiiPcPiPViiS0_S_,@object # @_Z5benesiiPcPiPViiS0_S_
.section .rodata,"a",@progbits
.globl _Z5benesiiPcPiPViiS0_S_
.p2align 3, 0x0
_Z5benesiiPcPiPViiS0_S_:
.quad _Z20__device_stub__benesiiPcPiPViiS0_S_
.size _Z5benesiiPcPiPViiS0_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Usage: %s <input.txt> <size>\n"
.size .L.str, 30
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "ERROR in routing at output %d %d %d\n"
.size .L.str.2, 37
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5benesiiPcPiPViiS0_S_"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Routing was successful!"
.size .Lstr, 24
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Could not open input file"
.size .Lstr.1, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__benesiiPcPiPViiS0_S_
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z5benesiiPcPiPViiS0_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00085769_00000000-6_main.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB7002:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE7002:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7makeLUTiPi
.type _Z7makeLUTiPi, @function
_Z7makeLUTiPi:
.LFB6997:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movl %edi, %r15d
movq %rsi, 48(%rsp)
pxor %xmm1, %xmm1
cvtsi2sdl %edi, %xmm1
movsd %xmm1, 8(%rsp)
movapd %xmm1, %xmm0
call log2@PLT
addsd %xmm0, %xmm0
subsd .LC0(%rip), %xmm0
mulsd 8(%rsp), %xmm0
cvttsd2sil %xmm0, %ecx
movl %ecx, %eax
shrl $31, %eax
addl %ecx, %eax
sarl %eax
movl %eax, %r8d
movl %eax, %edx
cmpl $1, %ecx
jle .L4
movslq %r15d, %rax
movq %rax, 40(%rsp)
movl %r15d, %ebp
movl $0, %eax
movl $0, %edi
movl %ecx, 56(%rsp)
movl %r8d, 36(%rsp)
movq %rax, %rdx
movl %r8d, 60(%rsp)
jmp .L5
.L6:
movl %edx, (%rcx,%rax,4)
addl $2, %edx
addq $1, %rax
cmpl %eax, %esi
jg .L6
leal 2(%r14,%r10), %r10d
.L10:
cmpl %ebp, %esi
jge .L7
movq 16(%rsp), %rax
leaq (%rcx,%rax), %rdx
leal 0(%r13,%r8), %edi
movl %r8d, %eax
.L8:
movl %eax, (%rdx)
addl $2, %eax
addq $4, %rdx
cmpl %edi, %eax
jne .L8
leal 2(%r11,%r8), %r8d
.L7:
addl %r12d, %r9d
addq %rbx, %rcx
movl %r9d, %eax
movl 8(%rsp), %edi
subl %edi, %eax
cmpl %eax, %r15d
jle .L23
.L11:
movl %r10d, %edx
movl $0, %eax
cmpl $1, %ebp
jg .L6
jmp .L10
.L23:
movl 32(%rsp), %edi
movq 24(%rsp), %rdx
.L9:
movl %ebp, %eax
shrl $31, %eax
addl %ebp, %eax
sarl %eax
movl %eax, %ebp
addl %r15d, %edi
movq 40(%rsp), %rax
addq %rax, %rdx
movl 36(%rsp), %eax
cmpl %eax, %edi
jge .L25
.L5:
testl %r15d, %r15d
jle .L9
movl %ebp, %esi
shrl $31, %esi
addl %ebp, %esi
sarl %esi
movl %ebp, %r12d
movl %edi, 8(%rsp)
movslq %ebp, %rbx
salq $2, %rbx
movq 48(%rsp), %rax
leaq (%rax,%rdx,4), %rcx
leal -2(%rsi,%rsi), %r14d
cmpl $1, %ebp
movl $0, %eax
cmovle %eax, %r14d
movslq %esi, %rax
salq $2, %rax
movq %rax, 16(%rsp)
leal -1(%rbp), %r11d
subl %esi, %r11d
addl %r11d, %r11d
movl %edi, %r9d
movl $1, %r8d
movl $0, %r10d
movl %ebp, %r13d
subl %esi, %r13d
addl %r13d, %r13d
movl %edi, 32(%rsp)
movq %rdx, 24(%rsp)
jmp .L11
.L25:
movl 56(%rsp), %ecx
movl 36(%rsp), %edx
movl 60(%rsp), %r8d
.L4:
movl %ecx, %eax
subl %r15d, %eax
cmpl %r8d, %ecx
jle .L3
movl %r15d, %r9d
negl %r9d
movslq %r9d, %r9
salq $2, %r9
movslq %r8d, %r8
cltq
subq %r8, %rax
movq 48(%rsp), %r10
leaq (%r10,%rax,4), %rdi
movslq %r15d, %r8
jmp .L13
.L14:
movl %edx, %esi
addl (%rdi,%rax,4), %esi
movslq %esi, %rsi
movl %eax, (%r10,%rsi,4)
addq $1, %rax
cmpq %rax, %r8
jne .L14
.L16:
addl %r15d, %edx
addq %r9, %rdi
cmpl %edx, %ecx
jle .L3
.L13:
movl $0, %eax
testl %r15d, %r15d
jg .L14
jmp .L16
.L3:
addq $72, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE6997:
.size _Z7makeLUTiPi, .-_Z7makeLUTiPi
.globl _Z10createMaski
.type _Z10createMaski, @function
_Z10createMaski:
.LFB6998:
.cfi_startproc
endbr64
testl %edi, %edi
jle .L30
movl $0, %ecx
movl $0, %eax
movl $1, %esi
.L29:
movl %esi, %edx
sall %cl, %edx
orl %edx, %eax
addl $1, %ecx
cmpl %ecx, %edi
jne .L29
ret
.L30:
movl $0, %eax
ret
.cfi_endproc
.LFE6998:
.size _Z10createMaski, .-_Z10createMaski
.globl _Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_
.type _Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_, @function
_Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_:
.LFB7024:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movq %rdx, 48(%rsp)
movq %rcx, 40(%rsp)
movq %r8, 32(%rsp)
movl %r9d, 28(%rsp)
movq 224(%rsp), %rax
movq %rax, 16(%rsp)
movq 232(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rax
movq %rax, 160(%rsp)
leaq 28(%rsp), %rax
movq %rax, 168(%rsp)
leaq 16(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L36
.L32:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L37
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L36:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 232
pushq 72(%rsp)
.cfi_def_cfa_offset 240
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z5benesiiPcPiPViiS0_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L32
.L37:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE7024:
.size _Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_, .-_Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_
.globl _Z5benesiiPcPiPViiS0_S_
.type _Z5benesiiPcPiPViiS0_S_, @function
_Z5benesiiPcPiPViiS0_S_:
.LFB7025:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE7025:
.size _Z5benesiiPcPiPViiS0_S_, .-_Z5benesiiPcPiPViiS0_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z5benesiiPcPiPViiS0_S_"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "_ZN37_INTERNAL_9a227ec7_7_main_cu_c848ebe24cuda3std3__419piecewise_constructE"
.align 8
.LC3:
.string "_ZN37_INTERNAL_9a227ec7_7_main_cu_c848ebe24cuda3std6ranges3__45__cpo4swapE"
.align 8
.LC4:
.string "_ZN37_INTERNAL_9a227ec7_7_main_cu_c848ebe24cuda3std6ranges3__45__cpo9iter_moveE"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB7027:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z5benesiiPcPiPViiS0_S_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $1, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZN4cuda3std3__419piecewise_constructE(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $1, %r9d
movl $0, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _ZN4cuda3std6ranges3__45__cpo4swapE(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $1, %r9d
movl $0, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _ZN4cuda3std6ranges3__45__cpo9iter_moveE(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE7027:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .rodata.str1.1
.LC5:
.string "Usage: %s <input.txt> <size>\n"
.LC6:
.string "Could not open input file\n"
.section .rodata.str1.8
.align 8
.LC8:
.string "ERROR in routing at output %d %d %d\n"
.section .rodata.str1.1
.LC9:
.string "Routing was successful!\n"
.text
.globl main
.type main, @function
main:
.LFB6999:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA6999
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $648, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
cmpl $3, %edi
je .L43
movq (%rsi), %rdx
leaq .LC5(%rip), %rsi
movl $2, %edi
.LEHB0:
call __printf_chk@PLT
movl $1, %r13d
.L42:
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L65
movl %r13d, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L43:
.cfi_restore_state
movq 8(%rsi), %rsi
leaq -576(%rbp), %rdi
movl $4, %edx
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode@PLT
.LEHE0:
movl -288(%rbp), %r13d
andl $5, %r13d
jne .L66
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r14
movl $262144, %ebx
cmpl %ebx, %eax
cmovle %eax, %ebx
movl $2, %ecx
movl %ebx, %eax
cltd
idivl %ecx
movl %eax, %r12d
pxor %xmm1, %xmm1
cvtsi2sdl %ebx, %xmm1
movsd %xmm1, -664(%rbp)
movapd %xmm1, %xmm0
call log2@PLT
addsd %xmm0, %xmm0
subsd .LC7(%rip), %xmm0
cvttsd2sil %xmm0, %r15d
movl %r15d, -680(%rbp)
movsd -664(%rbp), %xmm0
call log2@PLT
movsd -664(%rbp), %xmm0
call log2@PLT
addsd %xmm0, %xmm0
subsd .LC0(%rip), %xmm0
mulsd -664(%rbp), %xmm0
cvttsd2sil %xmm0, %eax
movl %eax, -676(%rbp)
cmpl $262143, %r14d
jg .L47
leal (%r15,%r15), %r15d
.L47:
leal 1(%r15), %eax
imull %ebx, %eax
cltq
leaq -648(%rbp), %rdi
movl $1, %edx
movq %rax, -672(%rbp)
movq %rax, %rsi
.LEHB1:
call cudaMallocManaged@PLT
jmp .L67
.L66:
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L68
.L67:
movq -672(%rbp), %rdx
movl $0, %esi
movq -648(%rbp), %rdi
call memset@PLT
movslq -676(%rbp), %rsi
salq $2, %rsi
leaq -640(%rbp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movq -640(%rbp), %rsi
movl %ebx, %edi
call _Z7makeLUTiPi
movsd -664(%rbp), %xmm0
call log2@PLT
cvttsd2sil %xmm0, %edi
call _Z10createMaski
movl %eax, -664(%rbp)
movl %r15d, %eax
imull %r12d, %eax
movl %eax, -672(%rbp)
movslq %eax, %rsi
salq $2, %rsi
leaq -632(%rbp), %rdi
movl $1, %edx
call cudaMallocManaged@PLT
movl -672(%rbp), %edx
addl %r12d, %edx
movslq %edx, %rdx
salq $2, %rdx
movl $0, %esi
movq -632(%rbp), %rdi
call memset@PLT
cmpl $1, %r14d
jle .L48
movl $0, %eax
.L49:
movq -632(%rbp), %rdx
movl $1, (%rdx,%rax,4)
addq $1, %rax
cmpl %eax, %r12d
jg .L49
.L48:
leaq -624(%rbp), %rdi
movl $1, %edx
movl $1048576, %esi
call cudaMallocManaged@PLT
movl $1048576, %edx
movl $0, %esi
movq -624(%rbp), %rdi
call memset@PLT
leaq -576(%rbp), %rdi
movl $1048576, %edx
movq -624(%rbp), %rsi
call _ZNSi4readEPcl@PLT
leaq -576(%rbp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv@PLT
leaq -616(%rbp), %rdi
movl $1, %edx
movl $1048576, %esi
call cudaMallocManaged@PLT
movl $1048576, %edx
movq -624(%rbp), %rsi
movq -616(%rbp), %rdi
call memcpy@PLT
leaq -608(%rbp), %rdi
movl $1, %edx
movl $1048576, %esi
call cudaMallocManaged@PLT
movl $1048576, %edx
movl $0, %esi
movq -608(%rbp), %rdi
call memset@PLT
movl %r12d, -588(%rbp)
movl $1, -584(%rbp)
movl $1, -580(%rbp)
movl %r15d, -600(%rbp)
movl $1, -596(%rbp)
movl $1, -592(%rbp)
movl $0, %r9d
movl $0, %r8d
movq -588(%rbp), %rdx
movl $1, %ecx
movq -600(%rbp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L50
pushq -608(%rbp)
pushq -616(%rbp)
movl -664(%rbp), %r9d
movq -632(%rbp), %r8
movq -640(%rbp), %rcx
movq -648(%rbp), %rdx
movl -680(%rbp), %esi
movl %ebx, %edi
.cfi_escape 0x2e,0x10
call _Z37__device_stub__Z5benesiiPcPiPViiS0_S_iiPcPiPViiS0_S_
addq $16, %rsp
.L50:
.cfi_escape 0x2e,0
call cudaDeviceSynchronize@PLT
movq -608(%rbp), %r10
movl $0, %esi
leal -1(%rbx), %r9d
jmp .L52
.L51:
addq $1, %rsi
cmpq $262143, %rsi
je .L69
.L52:
movl %esi, %eax
cltd
idivl %ebx
cmpl %r9d, %edx
je .L51
movzbl 1(%r10,%rsi), %ecx
testb %cl, %cl
je .L51
movsbl %cl, %ecx
movl -664(%rbp), %eax
andl %eax, %ecx
movsbl (%r10,%rsi), %r8d
andl %eax, %r8d
cmpl %r8d, %ecx
jge .L51
movl %esi, %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L70
.L69:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq -632(%rbp), %rdi
call cudaFree@PLT
movq -640(%rbp), %rdi
call cudaFree@PLT
movq -648(%rbp), %rdi
call cudaFree@PLT
movq -624(%rbp), %rdi
call cudaFree@PLT
movq -616(%rbp), %rdi
call cudaFree@PLT
movq -608(%rbp), %rdi
call cudaFree@PLT
.LEHE1:
jmp .L46
.L68:
movl $1, %r13d
jmp .L46
.L70:
movl $1, %r13d
.L46:
leaq -576(%rbp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT
jmp .L42
.L59:
endbr64
movq %rax, %rbx
leaq -576(%rbp), %rdi
call _ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@PLT
movq -56(%rbp), %rax
subq %fs:40, %rax
je .L54
call __stack_chk_fail@PLT
.L54:
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L65:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE6999:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA6999:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE6999-.LLSDACSB6999
.LLSDACSB6999:
.uleb128 .LEHB0-.LFB6999
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB6999
.uleb128 .LEHE1-.LEHB1
.uleb128 .L59-.LFB6999
.uleb128 0
.uleb128 .LEHB2-.LFB6999
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.LLSDACSE6999:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.weak _ZN4cuda3std3__419piecewise_constructE
.section .rodata._ZN4cuda3std3__419piecewise_constructE,"aG",@progbits,_ZN4cuda3std3__419piecewise_constructE,comdat
.type _ZN4cuda3std3__419piecewise_constructE, @gnu_unique_object
.size _ZN4cuda3std3__419piecewise_constructE, 1
_ZN4cuda3std3__419piecewise_constructE:
.zero 1
.weak _ZN4cuda3std6ranges3__45__cpo9iter_moveE
.section .rodata._ZN4cuda3std6ranges3__45__cpo9iter_moveE,"aG",@progbits,_ZN4cuda3std6ranges3__45__cpo9iter_moveE,comdat
.type _ZN4cuda3std6ranges3__45__cpo9iter_moveE, @gnu_unique_object
.size _ZN4cuda3std6ranges3__45__cpo9iter_moveE, 1
_ZN4cuda3std6ranges3__45__cpo9iter_moveE:
.zero 1
.weak _ZN4cuda3std6ranges3__45__cpo4swapE
.section .rodata._ZN4cuda3std6ranges3__45__cpo4swapE,"aG",@progbits,_ZN4cuda3std6ranges3__45__cpo4swapE,comdat
.type _ZN4cuda3std6ranges3__45__cpo4swapE, @gnu_unique_object
.size _ZN4cuda3std6ranges3__45__cpo4swapE, 1
_ZN4cuda3std6ranges3__45__cpo4swapE:
.zero 1
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1073741824
.align 8
.LC7:
.long 0
.long 1072693248
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "main.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z7makeLUTiPi
.LCPI0_0:
.quad 0xc000000000000000 # double -2
.text
.globl _Z7makeLUTiPi
.p2align 4, 0x90
.type _Z7makeLUTiPi,@function
_Z7makeLUTiPi: # @_Z7makeLUTiPi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
cvtsi2sd %edi, %xmm0
movsd %xmm0, (%rsp) # 8-byte Spill
callq log2
addsd %xmm0, %xmm0
addsd .LCPI0_0(%rip), %xmm0
mulsd (%rsp), %xmm0 # 8-byte Folded Reload
cvttsd2si %xmm0, %eax
movl %eax, %ecx
shrl $31, %ecx
addl %eax, %ecx
sarl %ecx
movl %eax, 8(%rsp) # 4-byte Spill
cmpl $2, %eax
jl .LBB0_10
# %bb.1: # %.preheader68.lr.ph
movslq %ebp, %rdx
movl %ecx, %eax
movq %rax, (%rsp) # 8-byte Spill
leaq (,%rdx,4), %rax
movq %rax, 16(%rsp) # 8-byte Spill
xorl %r8d, %r8d
movq %rbx, %rsi
movl %ebp, %r10d
movl %ebp, 12(%rsp) # 4-byte Spill
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_9: # %._crit_edge80
# in Loop: Header=BB0_2 Depth=1
movl %r10d, %eax
shrl $31, %eax
addl %r10d, %eax
sarl %eax
addq %rdx, %r8
addq 16(%rsp), %rsi # 8-byte Folded Reload
movl %eax, %r10d
cmpq (%rsp), %r8 # 8-byte Folded Reload
movl 12(%rsp), %ebp # 4-byte Reload
jge .LBB0_10
.LBB0_2: # %.preheader68
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
# Child Loop BB0_6 Depth 3
# Child Loop BB0_17 Depth 3
testl %ebp, %ebp
jle .LBB0_9
# %bb.3: # %.preheader67.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r10d, %r11d
shrl $31, %r11d
addl %r10d, %r11d
sarl %r11d
movslq %r11d, %r14
movslq %r10d, %r15
leaq (,%r15,4), %r12
movl $1, %r13d
xorl %eax, %eax
movq %rsi, %r9
xorl %edi, %edi
jmp .LBB0_4
.p2align 4, 0x90
.LBB0_8: # %._crit_edge
# in Loop: Header=BB0_4 Depth=2
addq %r15, %rax
addq %r12, %r9
cmpq %rdx, %rax
jge .LBB0_9
.LBB0_4: # %.preheader67
# Parent Loop BB0_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB0_6 Depth 3
# Child Loop BB0_17 Depth 3
cmpl $2, %r10d
jl .LBB0_7
# %bb.5: # %.lr.ph
# in Loop: Header=BB0_4 Depth=2
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB0_6: # Parent Loop BB0_2 Depth=1
# Parent Loop BB0_4 Depth=2
# => This Inner Loop Header: Depth=3
movl %edi, (%r9,%rbp,4)
addl $2, %edi
incq %rbp
cmpq %rbp, %r11
jne .LBB0_6
.LBB0_7: # %.preheader66
# in Loop: Header=BB0_4 Depth=2
movq %r14, %rbp
cmpl %r10d, %r11d
jge .LBB0_8
.p2align 4, 0x90
.LBB0_17: # Parent Loop BB0_2 Depth=1
# Parent Loop BB0_4 Depth=2
# => This Inner Loop Header: Depth=3
movl %r13d, (%r9,%rbp,4)
addl $2, %r13d
incq %rbp
cmpq %rbp, %r15
jne .LBB0_17
jmp .LBB0_8
.LBB0_10: # %.preheader65
movl 8(%rsp), %r10d # 4-byte Reload
movl %r10d, %eax
subl %ecx, %eax
jle .LBB0_16
# %bb.11: # %.preheader.lr.ph
movl %ebp, %edx
subl %ebp, %eax
movl %ebp, %esi
negl %esi
jmp .LBB0_12
.p2align 4, 0x90
.LBB0_15: # %._crit_edge86
# in Loop: Header=BB0_12 Depth=1
addl %ebp, %ecx
addq %rsi, %rax
cmpl %r10d, %ecx
jge .LBB0_16
.LBB0_12: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_14 Depth 2
testl %ebp, %ebp
jle .LBB0_15
# %bb.13: # %.lr.ph85
# in Loop: Header=BB0_12 Depth=1
movslq %ecx, %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB0_14: # Parent Loop BB0_12 Depth=1
# => This Inner Loop Header: Depth=2
leal (%rax,%r8), %r9d
movslq %r9d, %r9
movslq (%rbx,%r9,4), %r9
addq %rdi, %r9
movl %r8d, (%rbx,%r9,4)
incq %r8
cmpq %r8, %rdx
jne .LBB0_14
jmp .LBB0_15
.LBB0_16: # %._crit_edge90
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z7makeLUTiPi, .Lfunc_end0-_Z7makeLUTiPi
.cfi_endproc
# -- End function
.globl _Z10createMaski # -- Begin function _Z10createMaski
.p2align 4, 0x90
.type _Z10createMaski,@function
_Z10createMaski: # @_Z10createMaski
.cfi_startproc
# %bb.0:
testl %edi, %edi
jle .LBB1_1
# %bb.3: # %.lr.ph.preheader
xorl %ecx, %ecx
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
btsl %ecx, %eax
incl %ecx
cmpl %ecx, %edi
jne .LBB1_4
# %bb.2: # %._crit_edge
retq
.LBB1_1:
xorl %eax, %eax
retq
.Lfunc_end1:
.size _Z10createMaski, .Lfunc_end1-_Z10createMaski
.cfi_endproc
# -- End function
.globl _Z20__device_stub__benesiiPcPiPViiS0_S_ # -- Begin function _Z20__device_stub__benesiiPcPiPViiS0_S_
.p2align 4, 0x90
.type _Z20__device_stub__benesiiPcPiPViiS0_S_,@function
_Z20__device_stub__benesiiPcPiPViiS0_S_: # @_Z20__device_stub__benesiiPcPiPViiS0_S_
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 20(%rsp)
movl %esi, 16(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movl %r9d, 12(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
leaq 184(%rsp), %rax
movq %rax, 152(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z5benesiiPcPiPViiS0_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end2:
.size _Z20__device_stub__benesiiPcPiPViiS0_S_, .Lfunc_end2-_Z20__device_stub__benesiiPcPiPViiS0_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0xbff0000000000000 # double -1
.LCPI3_1:
.quad 0xc000000000000000 # double -2
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $760, %rsp # imm = 0x2F8
.cfi_def_cfa_offset 816
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $3, %edi
jne .LBB3_1
# %bb.2:
movq %rsi, %rbx
movq 8(%rsi), %rsi
.cfi_escape 0x2e, 0x00
leaq 240(%rsp), %rdi
movl $4, %edx
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode
movq 240(%rsp), %rax
movq -24(%rax), %rax
testb $5, 272(%rsp,%rax)
je .LBB3_4
# %bb.3:
.cfi_escape 0x2e, 0x00
movl $.Lstr.1, %edi
callq puts@PLT
movl $1, %ebx
jmp .LBB3_46
.LBB3_1:
movq (%rsi), %rsi
.cfi_escape 0x2e, 0x00
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $1, %ebx
jmp .LBB3_47
.LBB3_4:
movq 16(%rbx), %rdi
.cfi_escape 0x2e, 0x00
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
cmpl $262144, %r14d # imm = 0x40000
movl $262144, %ebx # imm = 0x40000
cmovll %r14d, %ebx
cvtsi2sd %ebx, %xmm0
.cfi_escape 0x2e, 0x00
movsd %xmm0, (%rsp) # 8-byte Spill
callq log2
addsd %xmm0, %xmm0
addsd .LCPI3_0(%rip), %xmm0
cvttsd2si %xmm0, %ebp
testl %r14d, %r14d
jle .LBB3_5
.LBB3_6: # %cdce.end
.cfi_escape 0x2e, 0x00
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq log2
movsd %xmm0, 80(%rsp) # 8-byte Spill
cmpl $262144, %r14d # imm = 0x40000
setl %cl
movl %ebp, 56(%rsp) # 4-byte Spill
movl %ebp, %eax
shll %cl, %eax
movq %rax, 72(%rsp) # 8-byte Spill
leal 1(%rax), %r12d
movl %r12d, %eax
imull %ebx, %eax
movslq %eax, %r15
.Ltmp0:
.cfi_escape 0x2e, 0x00
leaq 48(%rsp), %rdi
movq %r15, %rsi
movl $1, %edx
callq hipMallocManaged
.Ltmp1:
# %bb.7: # %_ZL16hipMallocManagedIcE10hipError_tPPT_mj.exit
movsd 80(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
addsd %xmm0, %xmm0
addsd .LCPI3_1(%rip), %xmm0
mulsd (%rsp), %xmm0 # 8-byte Folded Reload
cvttsd2si %xmm0, %r13d
movq 48(%rsp), %rdi
.cfi_escape 0x2e, 0x00
xorl %ebp, %ebp
xorl %esi, %esi
movq %r15, %rdx
callq memset@PLT
movslq %r13d, %rsi
shlq $2, %rsi
.Ltmp3:
.cfi_escape 0x2e, 0x00
leaq 40(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
.Ltmp4:
# %bb.8: # %_ZL16hipMallocManagedIiE10hipError_tPPT_mj.exit
movl %ebx, %r15d
shrl $31, %r15d
addl %ebx, %r15d
sarl %r15d
movq 40(%rsp), %rsi
.cfi_escape 0x2e, 0x00
movl %ebx, %edi
callq _Z7makeLUTiPi
.cfi_escape 0x2e, 0x00
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq log2
cvttsd2si %xmm0, %eax
testl %eax, %eax
movq 72(%rsp), %r13 # 8-byte Reload
jle .LBB3_11
# %bb.9: # %.lr.ph.i.preheader
xorl %ecx, %ecx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB3_10: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
btsl %ecx, %ebp
incl %ecx
cmpl %ecx, %eax
jne .LBB3_10
.LBB3_11: # %_Z10createMaski.exit
movl %r13d, %eax
imull %r15d, %eax
movslq %eax, %rsi
shlq $2, %rsi
.Ltmp6:
.cfi_escape 0x2e, 0x00
leaq 16(%rsp), %rdi
movl $1, %edx
callq hipMallocManaged
.Ltmp7:
# %bb.12: # %_ZL16hipMallocManagedIiE10hipError_tPPT_mj.exit80
movq 16(%rsp), %rdi
imull %r15d, %r12d
movslq %r12d, %rdx
shlq $2, %rdx
.cfi_escape 0x2e, 0x00
xorl %esi, %esi
callq memset@PLT
cmpl $2, %r14d
jl .LBB3_15
# %bb.13: # %.lr.ph
movq 16(%rsp), %rax
cmpl $2, %r15d
movl $1, %ecx
cmovgel %r15d, %ecx
xorl %edx, %edx
.p2align 4, 0x90
.LBB3_14: # =>This Inner Loop Header: Depth=1
movl $1, (%rax,%rdx,4)
incq %rdx
cmpq %rdx, %rcx
jne .LBB3_14
.LBB3_15: # %._crit_edge
.Ltmp9:
.cfi_escape 0x2e, 0x00
leaq 32(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
movl $1, %edx
callq hipMallocManaged
.Ltmp10:
# %bb.16: # %_ZL16hipMallocManagedIcE10hipError_tPPT_mj.exit81
movq 32(%rsp), %r14
.cfi_escape 0x2e, 0x00
movl $1048576, %edx # imm = 0x100000
movq %r14, %rdi
xorl %esi, %esi
callq memset@PLT
.Ltmp11:
.cfi_escape 0x2e, 0x00
leaq 240(%rsp), %rdi
movl $1048576, %edx # imm = 0x100000
movq %r14, %rsi
callq _ZNSi4readEPcl
.Ltmp12:
# %bb.17:
leaq 256(%rsp), %rdi
.Ltmp13:
.cfi_escape 0x2e, 0x00
callq _ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv
.Ltmp14:
# %bb.18: # %.noexc
testq %rax, %rax
jne .LBB3_20
# %bb.19:
movq 240(%rsp), %rax
movq -24(%rax), %rax
leaq (%rsp,%rax), %rdi
addq $240, %rdi
movl 272(%rsp,%rax), %esi
orl $4, %esi
.Ltmp15:
.cfi_escape 0x2e, 0x00
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.Ltmp16:
.LBB3_20: # %_ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv.exit
.Ltmp18:
.cfi_escape 0x2e, 0x00
leaq 24(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
movl $1, %edx
callq hipMallocManaged
.Ltmp19:
# %bb.21: # %_ZL16hipMallocManagedIiE10hipError_tPPT_mj.exit84
movq 24(%rsp), %rdi
movq 32(%rsp), %rsi
.cfi_escape 0x2e, 0x00
movl $1048576, %edx # imm = 0x100000
callq memcpy@PLT
.Ltmp21:
.cfi_escape 0x2e, 0x00
leaq 8(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
movl $1, %edx
callq hipMallocManaged
.Ltmp22:
# %bb.22: # %_ZL16hipMallocManagedIcE10hipError_tPPT_mj.exit86
movq 8(%rsp), %rdi
.cfi_escape 0x2e, 0x00
movl $1048576, %edx # imm = 0x100000
xorl %esi, %esi
callq memset@PLT
movl %r13d, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl %r15d, %edx
orq %rax, %rdx
.Ltmp23:
.cfi_escape 0x2e, 0x00
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
.Ltmp24:
# %bb.23:
testl %eax, %eax
jne .LBB3_26
# %bb.24:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 16(%rsp), %rdx
movq 24(%rsp), %rsi
movq 8(%rsp), %rdi
movl %ebx, 68(%rsp)
movl 56(%rsp), %r8d # 4-byte Reload
movl %r8d, 64(%rsp)
movq %rax, 168(%rsp)
movq %rcx, 160(%rsp)
movq %rdx, 152(%rsp)
movl %ebp, 60(%rsp)
movq %rsi, 144(%rsp)
movq %rdi, 136(%rsp)
leaq 68(%rsp), %rax
movq %rax, 176(%rsp)
leaq 64(%rsp), %rax
movq %rax, 184(%rsp)
leaq 168(%rsp), %rax
movq %rax, 192(%rsp)
leaq 160(%rsp), %rax
movq %rax, 200(%rsp)
leaq 152(%rsp), %rax
movq %rax, 208(%rsp)
leaq 60(%rsp), %rax
movq %rax, 216(%rsp)
leaq 144(%rsp), %rax
movq %rax, 224(%rsp)
leaq 136(%rsp), %rax
movq %rax, 232(%rsp)
.Ltmp25:
.cfi_escape 0x2e, 0x00
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
.Ltmp26:
# %bb.25: # %.noexc87
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
.Ltmp27:
.cfi_escape 0x2e, 0x10
leaq 176(%rsp), %r9
movl $_Z5benesiiPcPiPViiS0_S_, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.Ltmp28:
.LBB3_26:
.Ltmp29:
.cfi_escape 0x2e, 0x00
callq hipDeviceSynchronize
.Ltmp30:
# %bb.27: # %.preheader
leal -1(%rbx), %edi
movq 8(%rsp), %r8
xorl %esi, %esi
xorl %r14d, %r14d
jmp .LBB3_28
.p2align 4, 0x90
.LBB3_37: # in Loop: Header=BB3_28 Depth=1
cmpq $262142, %rsi # imm = 0x3FFFE
leaq 1(%rsi), %rax
setae %r14b
movq %rax, %rsi
cmpq $262143, %rax # imm = 0x3FFFF
je .LBB3_38
.LBB3_28: # =>This Inner Loop Header: Depth=1
movl %esi, %eax
cltd
idivl %ebx
cmpl %edi, %edx
je .LBB3_37
# %bb.29: # in Loop: Header=BB3_28 Depth=1
movsbl 1(%r8,%rsi), %edx
testl %edx, %edx
je .LBB3_37
# %bb.30: # in Loop: Header=BB3_28 Depth=1
andl %ebp, %edx
movsbl (%r8,%rsi), %ecx
andl %ebp, %ecx
cmpl %ecx, %edx
jge .LBB3_37
# %bb.31:
.cfi_escape 0x2e, 0x00
movl $.L.str.2, %edi
# kill: def $esi killed $esi killed $rsi
xorl %eax, %eax
callq printf
movl $1, %ebx
testb $1, %r14b
jne .LBB3_40
jmp .LBB3_46
.LBB3_38:
xorl %ebx, %ebx
testb $1, %r14b
je .LBB3_46
.LBB3_40:
.cfi_escape 0x2e, 0x00
movl $.Lstr, %edi
callq puts@PLT
movq 16(%rsp), %rdi
.Ltmp31:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp32:
# %bb.41:
movq 40(%rsp), %rdi
.Ltmp33:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp34:
# %bb.42:
movq 48(%rsp), %rdi
.Ltmp35:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp36:
# %bb.43:
movq 32(%rsp), %rdi
.Ltmp37:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp38:
# %bb.44:
movq 24(%rsp), %rdi
.Ltmp39:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp40:
# %bb.45:
movq 8(%rsp), %rdi
.Ltmp41:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp42:
.LBB3_46:
.cfi_escape 0x2e, 0x00
leaq 240(%rsp), %rdi
movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev
leaq 496(%rsp), %rdi
.cfi_escape 0x2e, 0x00
callq _ZNSt8ios_baseD2Ev
.LBB3_47:
movl %ebx, %eax
addq $760, %rsp # imm = 0x2F8
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_5: # %cdce.call
.cfi_def_cfa_offset 816
.cfi_escape 0x2e, 0x00
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq log2
jmp .LBB3_6
.LBB3_48:
.Ltmp20:
jmp .LBB3_49
.LBB3_34:
.Ltmp8:
jmp .LBB3_49
.LBB3_33:
.Ltmp5:
jmp .LBB3_49
.LBB3_32:
.Ltmp2:
jmp .LBB3_49
.LBB3_35:
.Ltmp17:
jmp .LBB3_49
.LBB3_36:
.Ltmp43:
.LBB3_49:
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
leaq 240(%rsp), %rdi
movl $_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE, %esi
callq _ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev
leaq 496(%rsp), %rdi
.cfi_escape 0x2e, 0x00
callq _ZNSt8ios_baseD2Ev
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table3:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1
.uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2
.byte 0 # On action: cleanup
.uleb128 .Ltmp1-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp3-.Ltmp1 # Call between .Ltmp1 and .Ltmp3
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp4-.Ltmp3 # Call between .Ltmp3 and .Ltmp4
.uleb128 .Ltmp5-.Lfunc_begin0 # jumps to .Ltmp5
.byte 0 # On action: cleanup
.uleb128 .Ltmp6-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp7-.Ltmp6 # Call between .Ltmp6 and .Ltmp7
.uleb128 .Ltmp8-.Lfunc_begin0 # jumps to .Ltmp8
.byte 0 # On action: cleanup
.uleb128 .Ltmp7-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp9-.Ltmp7 # Call between .Ltmp7 and .Ltmp9
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp9-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Ltmp10-.Ltmp9 # Call between .Ltmp9 and .Ltmp10
.uleb128 .Ltmp17-.Lfunc_begin0 # jumps to .Ltmp17
.byte 0 # On action: cleanup
.uleb128 .Ltmp10-.Lfunc_begin0 # >> Call Site 8 <<
.uleb128 .Ltmp11-.Ltmp10 # Call between .Ltmp10 and .Ltmp11
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp11-.Lfunc_begin0 # >> Call Site 9 <<
.uleb128 .Ltmp16-.Ltmp11 # Call between .Ltmp11 and .Ltmp16
.uleb128 .Ltmp17-.Lfunc_begin0 # jumps to .Ltmp17
.byte 0 # On action: cleanup
.uleb128 .Ltmp18-.Lfunc_begin0 # >> Call Site 10 <<
.uleb128 .Ltmp19-.Ltmp18 # Call between .Ltmp18 and .Ltmp19
.uleb128 .Ltmp20-.Lfunc_begin0 # jumps to .Ltmp20
.byte 0 # On action: cleanup
.uleb128 .Ltmp19-.Lfunc_begin0 # >> Call Site 11 <<
.uleb128 .Ltmp21-.Ltmp19 # Call between .Ltmp19 and .Ltmp21
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp21-.Lfunc_begin0 # >> Call Site 12 <<
.uleb128 .Ltmp22-.Ltmp21 # Call between .Ltmp21 and .Ltmp22
.uleb128 .Ltmp43-.Lfunc_begin0 # jumps to .Ltmp43
.byte 0 # On action: cleanup
.uleb128 .Ltmp22-.Lfunc_begin0 # >> Call Site 13 <<
.uleb128 .Ltmp23-.Ltmp22 # Call between .Ltmp22 and .Ltmp23
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp23-.Lfunc_begin0 # >> Call Site 14 <<
.uleb128 .Ltmp42-.Ltmp23 # Call between .Ltmp23 and .Ltmp42
.uleb128 .Ltmp43-.Lfunc_begin0 # jumps to .Ltmp43
.byte 0 # On action: cleanup
.uleb128 .Ltmp42-.Lfunc_begin0 # >> Call Site 15 <<
.uleb128 .Lfunc_end3-.Ltmp42 # Call between .Ltmp42 and .Lfunc_end3
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5benesiiPcPiPViiS0_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5benesiiPcPiPViiS0_S_,@object # @_Z5benesiiPcPiPViiS0_S_
.section .rodata,"a",@progbits
.globl _Z5benesiiPcPiPViiS0_S_
.p2align 3, 0x0
_Z5benesiiPcPiPViiS0_S_:
.quad _Z20__device_stub__benesiiPcPiPViiS0_S_
.size _Z5benesiiPcPiPViiS0_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Usage: %s <input.txt> <size>\n"
.size .L.str, 30
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "ERROR in routing at output %d %d %d\n"
.size .L.str.2, 37
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z5benesiiPcPiPViiS0_S_"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Routing was successful!"
.size .Lstr, 24
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Could not open input file"
.size .Lstr.1, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__benesiiPcPiPViiS0_S_
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z5benesiiPcPiPViiS0_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // Esempio collective operations: reduce_sum
#include <cooperative_groups.h>
//using namespace cooperative_groups;
namespace cg = cooperative_groups;
#include <locale>
#include <stdlib.h>
#include <iostream>
#include <experimental/random>
#include <time.h>
#define RNG_MAX_VAL 3 // 5 // 50 // max rng val for array elems
static void HandleError( cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
std::cout << cudaGetErrorString( err ) << " in " << file << " line " << line << std::endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__))
void init_vec(int *v, int n) {
for (int i=0; i<n; i++) {
v[i] = std::experimental::randint(0,RNG_MAX_VAL);
//v[i] = i;
}
}
void show_vec(int *v, int n) {
std::cout << "\n" << v[0];
for (int i=1; i<n; i++) {
std::cout << ", " << v[i];
}
std::cout << "\n" << std::endl;
}
int cpu_sum(int *v, int n) {
int s=0;
for (int i=0; i<n; i++) {
s += v[i];
}
return s;
}
// Codice Prof
__device__ int reduce_sum(cg::thread_group g, int *temp, int val) {
int lane = g.thread_rank();
// ad ogni iterazione si dimezza il numero di thread attivi
// ogni thread somma parziale temp[i] a temp[lane+i]
for (int i=g.size()/2; i>0; i/=2) {
temp[lane] = val;
g.sync(); // attendo tutti thread del gruppo
if (lane < i) val += temp[lane+i];
g.sync();
}
return val; // solo thread 0 restituisce la somma completa
}
__device__ int thread_sum(int *input, int n) {
int sum=0;
for (int i=blockIdx.x * blockDim.x + threadIdx.x;
i<n/4;
i+=blockDim.x * gridDim.x) { // accesso strided
int4 in = ((int4*)input)[i]; // vector load e' piu' effciente
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_kernel_block(int *sum, int *input, int n) {
int my_sum = thread_sum(input, n);
extern __shared__ int temp[]; // extern perche' allocazione dinamica con
// terzo argomento della kernel call <<< ... >>>
//auto g = cg::this_thread_block();
cg::thread_block g = cg::this_thread_block();
int block_sum = reduce_sum(g, temp, my_sum);
if(g.thread_rank() == 0)
atomicAdd(sum, block_sum);
}
// END // Codice Prof
// ATTENZIONE!! Funziona solo con n=2^k con k>1
//int n = 1<<24; // array len = 16M // n=2^24 // bit shift operation
//int blockSize = 256;
////int nBlocks = (n+blockSize-1) / blockSize; // work as ceiling
//int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
//int sharedBytes = blockSize * sizeof(int);
// toy example
int n = 16;
int blockSize = 2;
int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
int sharedBytes = blockSize * sizeof(int);
int main( void ) {
//int seed = (int)time(NULL);
int seed = 1619508961;
std::experimental::reseed(seed);
std::cout << "seed = " << seed << std::endl;
std::cout << "\nn = " << n << std::endl;
std::cout << "blockSize = " << blockSize << std::endl;
std::cout << "nBlocks = " << nBlocks << std::endl;
std::cout << "sharedBytes = " << sharedBytes << "\n" << std::endl;
size_t data_size = (size_t)n*sizeof(int);
int *sum, *data;
sum = (int*)malloc(sizeof(int));
data = (int*)malloc(data_size);
int *d_sum, *d_data;
HANDLE_ERROR(cudaMalloc((void**)&d_sum, sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_data, data_size));
init_vec(data,n);
if (n < 32) // mostra il vettore solo se e' piccolo
show_vec(data,n);
HANDLE_ERROR(cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice));
sum_kernel_block<<<nBlocks, blockSize, sharedBytes>>>(d_sum, d_data, n);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(sum, d_sum, sizeof(int), cudaMemcpyDeviceToHost));
int c_sum = cpu_sum(data,n);
std::cout << "c_sum = " << c_sum << std::endl;
std::cout << "g_sum = " << *sum << std::endl;
if (c_sum == *sum)
std::cout << "\nCorrect" << std::endl;
else
std::cout << "\nWRONG!" << std::endl;
cudaFree(d_data);
cudaFree(d_sum);
return 0;
} | code for sm_80
Function : _Z16sum_kernel_blockPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR5, c[0x0][0x170] ; /* 0x00005c0000057ab9 */
/* 0x000fe20000000800 */
/*0030*/ BSSY B0, 0x1a0 ; /* 0x0000016000007945 */
/* 0x000fe20003800000 */
/*0040*/ USHF.R.S32.HI UR4, URZ, 0x1f, UR5 ; /* 0x0000001f3f047899 */
/* 0x000fe20008011405 */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0070*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fe200078e00ff */
/*0080*/ ULEA.HI UR4, UR4, UR5, URZ, 0x2 ; /* 0x0000000504047291 */
/* 0x000fe2000f8f103f */
/*0090*/ S2R R2, SR_TID.Z ; /* 0x0000000000027919 */
/* 0x000e660000002300 */
/*00a0*/ USHF.R.S32.HI UR4, URZ, 0x2, UR4 ; /* 0x000000023f047899 */
/* 0x000fe20008011404 */
/*00b0*/ S2R R9, SR_TID.Y ; /* 0x0000000000097919 */
/* 0x000ea20000002200 */
/*00c0*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*00d0*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06270 */
/*00e0*/ @P0 BRA 0x190 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*00f0*/ HFMA2.MMA R8, -RZ, RZ, 0, 0 ; /* 0x00000000ff087435 */
/* 0x006fe400000001ff */
/*0100*/ IMAD.MOV.U32 R5, RZ, RZ, 0x10 ; /* 0x00000010ff057424 */
/* 0x000fc800078e00ff */
/*0110*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fcc00078e0205 */
/*0120*/ LDG.E.128 R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea2000c1e1d00 */
/*0130*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0b7624 */
/* 0x000fc800078e00ff */
/*0140*/ IMAD R0, R11, c[0x0][0xc], R0 ; /* 0x000003000b007a24 */
/* 0x000fca00078e0200 */
/*0150*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe4000bf06270 */
/*0160*/ IADD3 R8, R4, R5, R8 ; /* 0x0000000504087210 */
/* 0x004fc80007ffe008 */
/*0170*/ IADD3 R8, R7, R8, R6 ; /* 0x0000000807087210 */
/* 0x000fce0007ffe006 */
/*0180*/ @!P0 BRA 0x100 ; /* 0xffffff7000008947 */
/* 0x000fea000383ffff */
/*0190*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x006fea0003800000 */
/*01a0*/ ULDC.64 UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000a00 */
/*01b0*/ IMAD R0, R2, c[0x0][0x4], R9 ; /* 0x0000010002007a24 */
/* 0x000fe200078e0209 */
/*01c0*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fe2000f8e023f */
/*01d0*/ IADD3 R5, -R3, RZ, RZ ; /* 0x000000ff03057210 */
/* 0x000fe40007ffe1ff */
/*01e0*/ ULDC UR5, c[0x0][0x8] ; /* 0x0000020000057ab9 */
/* 0x000fe20000000800 */
/*01f0*/ IMAD R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a24 */
/* 0x000fe200078e02ff */
/*0200*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fc8000f8e023f */
/*0210*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe20008011604 */
/*0220*/ ISETP.NE.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x000fca0003f05270 */
/*0230*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf25270 */
/*0240*/ @!P1 BRA 0x330 ; /* 0x000000e000009947 */
/* 0x000fea0003800000 */
/*0250*/ IMAD.IADD R3, R0, 0x1, R3 ; /* 0x0000000100037824 */
/* 0x000fe400078e0203 */
/*0260*/ IMAD.U32 R0, RZ, RZ, UR4 ; /* 0x00000004ff007e24 */
/* 0x000fc6000f8e00ff */
/*0270*/ SHF.L.U32 R5, R3, 0x2, RZ ; /* 0x0000000203057819 */
/* 0x000fe400000006ff */
/*0280*/ ISETP.GT.AND P1, PT, R0.reuse, R3, PT ; /* 0x000000030000720c */
/* 0x040fe20003f24270 */
/*0290*/ STS [R3.X4], R8 ; /* 0x0000000803007388 */
/* 0x000fe80000004800 */
/*02a0*/ BAR.SYNC 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000000000 */
/*02b0*/ ISETP.GT.AND P2, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fc40003f44270 */
/*02c0*/ @P1 IMAD R2, R0.reuse, 0x4, R5 ; /* 0x0000000400021824 */
/* 0x040fe200078e0205 */
/*02d0*/ LEA.HI R4, R0, R0, RZ, 0x1 ; /* 0x0000000000047211 */
/* 0x000fc800078f08ff */
/*02e0*/ @P1 LDS R7, [R2] ; /* 0x0000000002071984 */
/* 0x000e220000000800 */
/*02f0*/ SHF.R.S32.HI R0, RZ, 0x1, R4 ; /* 0x00000001ff007819 */
/* 0x000fe20000011404 */
/*0300*/ @P1 IMAD.IADD R8, R8, 0x1, R7 ; /* 0x0000000108081824 */
/* 0x001fe400078e0207 */
/*0310*/ BAR.SYNC 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000000000 */
/*0320*/ @P2 BRA 0x280 ; /* 0xffffff5000002947 */
/* 0x000fea000383ffff */
/*0330*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0340*/ S2R R0, SR_LANEID ; /* 0x0000000000007919 */
/* 0x000e220000000000 */
/*0350*/ REDUX.SUM UR5, R8 ; /* 0x00000000080573c4 */
/* 0x000e62000000c000 */
/*0360*/ VOTEU.ANY UR4, UPT, PT ; /* 0x0000000000047886 */
/* 0x000fe200038e0100 */
/*0370*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*0380*/ UFLO.U32 UR4, UR4 ; /* 0x00000004000472bd */
/* 0x000fe200080e0000 */
/*0390*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fca00078e00ff */
/*03a0*/ ISETP.EQ.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x001fe4000bf02070 */
/*03b0*/ MOV R5, UR5 ; /* 0x0000000500057c02 */
/* 0x002fd60008000f00 */
/*03c0*/ @P0 RED.E.ADD.STRONG.GPU [R2.64], R5 ; /* 0x000000050200098e */
/* 0x000fe2000c10e186 */
/*03d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03e0*/ BRA 0x3e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Esempio collective operations: reduce_sum
#include <cooperative_groups.h>
//using namespace cooperative_groups;
namespace cg = cooperative_groups;
#include <locale>
#include <stdlib.h>
#include <iostream>
#include <experimental/random>
#include <time.h>
#define RNG_MAX_VAL 3 // 5 // 50 // max rng val for array elems
static void HandleError( cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
std::cout << cudaGetErrorString( err ) << " in " << file << " line " << line << std::endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__))
void init_vec(int *v, int n) {
for (int i=0; i<n; i++) {
v[i] = std::experimental::randint(0,RNG_MAX_VAL);
//v[i] = i;
}
}
void show_vec(int *v, int n) {
std::cout << "\n" << v[0];
for (int i=1; i<n; i++) {
std::cout << ", " << v[i];
}
std::cout << "\n" << std::endl;
}
int cpu_sum(int *v, int n) {
int s=0;
for (int i=0; i<n; i++) {
s += v[i];
}
return s;
}
// Codice Prof
__device__ int reduce_sum(cg::thread_group g, int *temp, int val) {
int lane = g.thread_rank();
// ad ogni iterazione si dimezza il numero di thread attivi
// ogni thread somma parziale temp[i] a temp[lane+i]
for (int i=g.size()/2; i>0; i/=2) {
temp[lane] = val;
g.sync(); // attendo tutti thread del gruppo
if (lane < i) val += temp[lane+i];
g.sync();
}
return val; // solo thread 0 restituisce la somma completa
}
__device__ int thread_sum(int *input, int n) {
int sum=0;
for (int i=blockIdx.x * blockDim.x + threadIdx.x;
i<n/4;
i+=blockDim.x * gridDim.x) { // accesso strided
int4 in = ((int4*)input)[i]; // vector load e' piu' effciente
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_kernel_block(int *sum, int *input, int n) {
int my_sum = thread_sum(input, n);
extern __shared__ int temp[]; // extern perche' allocazione dinamica con
// terzo argomento della kernel call <<< ... >>>
//auto g = cg::this_thread_block();
cg::thread_block g = cg::this_thread_block();
int block_sum = reduce_sum(g, temp, my_sum);
if(g.thread_rank() == 0)
atomicAdd(sum, block_sum);
}
// END // Codice Prof
// ATTENZIONE!! Funziona solo con n=2^k con k>1
//int n = 1<<24; // array len = 16M // n=2^24 // bit shift operation
//int blockSize = 256;
////int nBlocks = (n+blockSize-1) / blockSize; // work as ceiling
//int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
//int sharedBytes = blockSize * sizeof(int);
// toy example
int n = 16;
int blockSize = 2;
int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
int sharedBytes = blockSize * sizeof(int);
int main( void ) {
//int seed = (int)time(NULL);
int seed = 1619508961;
std::experimental::reseed(seed);
std::cout << "seed = " << seed << std::endl;
std::cout << "\nn = " << n << std::endl;
std::cout << "blockSize = " << blockSize << std::endl;
std::cout << "nBlocks = " << nBlocks << std::endl;
std::cout << "sharedBytes = " << sharedBytes << "\n" << std::endl;
size_t data_size = (size_t)n*sizeof(int);
int *sum, *data;
sum = (int*)malloc(sizeof(int));
data = (int*)malloc(data_size);
int *d_sum, *d_data;
HANDLE_ERROR(cudaMalloc((void**)&d_sum, sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_data, data_size));
init_vec(data,n);
if (n < 32) // mostra il vettore solo se e' piccolo
show_vec(data,n);
HANDLE_ERROR(cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice));
sum_kernel_block<<<nBlocks, blockSize, sharedBytes>>>(d_sum, d_data, n);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(sum, d_sum, sizeof(int), cudaMemcpyDeviceToHost));
int c_sum = cpu_sum(data,n);
std::cout << "c_sum = " << c_sum << std::endl;
std::cout << "g_sum = " << *sum << std::endl;
if (c_sum == *sum)
std::cout << "\nCorrect" << std::endl;
else
std::cout << "\nWRONG!" << std::endl;
cudaFree(d_data);
cudaFree(d_sum);
return 0;
} | .file "tmpxft_00002d11_00000000-6_main.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string " in "
.LC1:
.string " line "
#NO_APP
.text
.type _ZL11HandleError9cudaErrorPKci, @function
_ZL11HandleError9cudaErrorPKci:
.LFB8141:
.cfi_startproc
testl %edi, %edi
jne .L6
ret
.L6:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbp
movl %edx, %ebx
call cudaGetErrorString@PLT
movq %rax, %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC0(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %rbp, %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE8141:
.size _ZL11HandleError9cudaErrorPKci, .-_ZL11HandleError9cudaErrorPKci
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB8150:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE8150:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .text._ZNSt12experimental15fundamentals_v217_S_randint_engineEv,"axG",@progbits,_ZNSt12experimental15fundamentals_v217_S_randint_engineEv,comdat
.weak _ZNSt12experimental15fundamentals_v217_S_randint_engineEv
.type _ZNSt12experimental15fundamentals_v217_S_randint_engineEv, @function
_ZNSt12experimental15fundamentals_v217_S_randint_engineEv:
.LFB8137:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA8137
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $4096, %rsp
.cfi_def_cfa_offset 4112
orq $0, (%rsp)
subq $944, %rsp
.cfi_def_cfa_offset 5056
movq %fs:40, %rax
movq %rax, 5032(%rsp)
xorl %eax, %eax
cmpb $0, %fs:_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@tpoff
je .L22
.L10:
movq %fs:0, %rax
addq $_ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@tpoff, %rax
movq 5032(%rsp), %rdx
subq %fs:40, %rdx
jne .L23
addq $5040, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore_state
movq %rsp, %rsi
leaq 16(%rsp), %rax
movq %rax, (%rsp)
movl $1634100580, 16(%rsp)
movl $1953264993, 19(%rsp)
movq $7, 8(%rsp)
movb $0, 23(%rsp)
leaq 32(%rsp), %rdi
.LEHB0:
call _ZNSt13random_device7_M_initERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@PLT
.LEHE0:
movq (%rsp), %rdi
leaq 16(%rsp), %rax
cmpq %rax, %rdi
je .L11
movq 16(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L11:
leaq 32(%rsp), %rdi
.LEHB1:
call _ZNSt13random_device9_M_getvalEv@PLT
.LEHE1:
movl %eax, %ecx
movabsq $8589934597, %rdx
movq %rcx, %rax
mulq %rdx
movq %rcx, %rax
subq %rdx, %rax
shrq %rax
addq %rax, %rdx
shrq $30, %rdx
movq %rdx, %rax
salq $31, %rax
subq %rdx, %rax
subq %rax, %rcx
movl $1, %eax
cmove %rax, %rcx
movq %rcx, %fs:_ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@tpoff
movb $1, %fs:_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@tpoff
leaq 32(%rsp), %rdi
call _ZNSt13random_device7_M_finiEv@PLT
jmp .L10
.L19:
endbr64
movq %rax, %rbx
movq %rsp, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 5032(%rsp), %rax
subq %fs:40, %rax
je .L14
call __stack_chk_fail@PLT
.L14:
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L18:
endbr64
movq %rax, %rbx
leaq 32(%rsp), %rdi
call _ZNSt13random_device7_M_finiEv@PLT
movq 5032(%rsp), %rax
subq %fs:40, %rax
je .L16
call __stack_chk_fail@PLT
.L16:
movq %rbx, %rdi
.LEHB3:
call _Unwind_Resume@PLT
.LEHE3:
.L23:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE8137:
.globl __gxx_personality_v0
.section .gcc_except_table._ZNSt12experimental15fundamentals_v217_S_randint_engineEv,"aG",@progbits,_ZNSt12experimental15fundamentals_v217_S_randint_engineEv,comdat
.LLSDA8137:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE8137-.LLSDACSB8137
.LLSDACSB8137:
.uleb128 .LEHB0-.LFB8137
.uleb128 .LEHE0-.LEHB0
.uleb128 .L19-.LFB8137
.uleb128 0
.uleb128 .LEHB1-.LFB8137
.uleb128 .LEHE1-.LEHB1
.uleb128 .L18-.LFB8137
.uleb128 0
.uleb128 .LEHB2-.LFB8137
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.uleb128 .LEHB3-.LFB8137
.uleb128 .LEHE3-.LEHB3
.uleb128 0
.uleb128 0
.LLSDACSE8137:
.section .text._ZNSt12experimental15fundamentals_v217_S_randint_engineEv,"axG",@progbits,_ZNSt12experimental15fundamentals_v217_S_randint_engineEv,comdat
.size _ZNSt12experimental15fundamentals_v217_S_randint_engineEv, .-_ZNSt12experimental15fundamentals_v217_S_randint_engineEv
.section .rodata.str1.1
.LC2:
.string "\n"
.LC3:
.string ", "
.text
.globl _Z8show_vecPii
.type _Z8show_vecPii, @function
_Z8show_vecPii:
.LFB8143:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbp
movl %esi, %r12d
movl $1, %edx
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl 0(%rbp), %esi
movq %rbx, %rdi
call _ZNSolsEi@PLT
cmpl $1, %r12d
jle .L25
leaq 4(%rbp), %rbx
leal -2(%r12), %eax
leaq 8(%rbp,%rax,4), %r13
leaq .LC3(%rip), %r12
leaq _ZSt4cout(%rip), %rbp
.L26:
movl $2, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl (%rbx), %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
addq $4, %rbx
cmpq %r13, %rbx
jne .L26
.L25:
movl $1, %edx
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbx
testq %rbx, %rbx
je .L32
cmpb $0, 56(%rbx)
je .L28
movzbl 67(%rbx), %esi
.L29:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
call _ZSt16__throw_bad_castv@PLT
.L28:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L29
.cfi_endproc
.LFE8143:
.size _Z8show_vecPii, .-_Z8show_vecPii
.globl _Z7cpu_sumPii
.type _Z7cpu_sumPii, @function
_Z7cpu_sumPii:
.LFB8144:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L36
movq %rdi, %rax
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rcx
movl $0, %edx
.L35:
addl (%rax), %edx
addq $4, %rax
cmpq %rcx, %rax
jne .L35
.L33:
movl %edx, %eax
ret
.L36:
movl $0, %edx
jmp .L33
.cfi_endproc
.LFE8144:
.size _Z7cpu_sumPii, .-_Z7cpu_sumPii
.globl _Z10reduce_sumN18cooperative_groups4__v112thread_groupEPii
.type _Z10reduce_sumN18cooperative_groups4__v112thread_groupEPii, @function
_Z10reduce_sumN18cooperative_groups4__v112thread_groupEPii:
.LFB8145:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE8145:
.size _Z10reduce_sumN18cooperative_groups4__v112thread_groupEPii, .-_Z10reduce_sumN18cooperative_groups4__v112thread_groupEPii
.globl _Z10thread_sumPii
.type _Z10thread_sumPii, @function
_Z10thread_sumPii:
.LFB8146:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE8146:
.size _Z10thread_sumPii, .-_Z10thread_sumPii
.globl _Z39__device_stub__Z16sum_kernel_blockPiS_iPiS_i
.type _Z39__device_stub__Z16sum_kernel_blockPiS_iPiS_i, @function
_Z39__device_stub__Z16sum_kernel_blockPiS_iPiS_i:
.LFB8172:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L46
.L42:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L47
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L46:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16sum_kernel_blockPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L42
.L47:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE8172:
.size _Z39__device_stub__Z16sum_kernel_blockPiS_iPiS_i, .-_Z39__device_stub__Z16sum_kernel_blockPiS_iPiS_i
.globl _Z16sum_kernel_blockPiS_i
.type _Z16sum_kernel_blockPiS_i, @function
_Z16sum_kernel_blockPiS_i:
.LFB8173:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z16sum_kernel_blockPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE8173:
.size _Z16sum_kernel_blockPiS_i, .-_Z16sum_kernel_blockPiS_i
.section .rodata.str1.1
.LC4:
.string "_Z16sum_kernel_blockPiS_i"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "_ZN37_INTERNAL_487d48ef_7_main_cu_21f3d31a4cuda3std3__419piecewise_constructE"
.align 8
.LC6:
.string "_ZN37_INTERNAL_487d48ef_7_main_cu_21f3d31a4cuda3std6ranges3__45__cpo4swapE"
.align 8
.LC7:
.string "_ZN37_INTERNAL_487d48ef_7_main_cu_21f3d31a4cuda3std6ranges3__45__cpo9iter_moveE"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB8175:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z16sum_kernel_blockPiS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $1, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZN4cuda3std3__419piecewise_constructE(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $1, %r9d
movl $0, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _ZN4cuda3std6ranges3__45__cpo4swapE(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $1, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZN4cuda3std6ranges3__45__cpo9iter_moveE(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE8175:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE,"axG",@progbits,_ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE,comdat
.align 2
.weak _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE
.type _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE, @function
_ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE:
.LFB8960:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, %rbx
movq %rdx, %rbp
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movslq 4(%rdx), %r12
movslq (%rdx), %rax
subq %rax, %r12
cmpq $2147483644, %r12
ja .L53
addq $1, %r12
movl $2147483645, %eax
movl $0, %edx
divq %r12
movq %rax, %r8
imulq %rax, %r12
movq (%rsi), %rdx
movabsq $8589934597, %rdi
.L54:
imulq $16807, %rdx, %rsi
movq %rsi, %rax
mulq %rdi
movq %rsi, %rcx
subq %rdx, %rcx
shrq %rcx
addq %rcx, %rdx
shrq $30, %rdx
movq %rdx, %rcx
salq $31, %rcx
subq %rdx, %rcx
subq %rcx, %rsi
movq %rsi, %rdx
leaq -1(%rsi), %rax
cmpq %r12, %rax
jnb .L54
movq %rsi, (%rbx)
movl $0, %edx
divq %r8
.L55:
addl 0(%rbp), %eax
movq 8(%rsp), %rdx
subq %fs:40, %rdx
jne .L62
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
movq %rdi, %r13
cmpq $2147483645, %r12
jbe .L56
movq %r12, %rdx
shrq %rdx
movabsq $-9223372028264841207, %rcx
movq %rdx, %rax
mulq %rcx
shrq $29, %rdx
movl %edx, %r14d
movabsq $8589934597, %r15
.L60:
movl $0, (%rsp)
movl %r14d, 4(%rsp)
movq %rsp, %rdx
movq %rbx, %rsi
movq %r13, %rdi
call _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE
cltq
movq %rax, %rcx
salq $30, %rcx
subq %rax, %rcx
addq %rcx, %rcx
imulq $16807, (%rbx), %rsi
movq %rsi, %rax
mulq %r15
movq %rsi, %rax
subq %rdx, %rax
shrq %rax
addq %rax, %rdx
shrq $30, %rdx
movq %rdx, %rax
salq $31, %rax
subq %rdx, %rax
subq %rax, %rsi
movq %rsi, (%rbx)
leaq -1(%rsi,%rcx), %rax
cmpq %rax, %r12
jb .L60
cmpq %rcx, %rax
jb .L60
jmp .L55
.L56:
imulq $16807, (%rsi), %rcx
movabsq $8589934597, %rdx
movq %rcx, %rax
mulq %rdx
movq %rcx, %rax
subq %rdx, %rax
shrq %rax
addq %rdx, %rax
shrq $30, %rax
movq %rax, %rdx
salq $31, %rdx
subq %rax, %rdx
movq %rcx, %rax
subq %rdx, %rax
movq %rax, (%rsi)
subq $1, %rax
jmp .L55
.L62:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE8960:
.size _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE, .-_ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE
.text
.globl _Z8init_vecPii
.type _Z8init_vecPii, @function
_Z8init_vecPii:
.LFB8142:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $16, %rsp
.cfi_def_cfa_offset 48
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
testl %esi, %esi
jle .L63
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %r12
movq %rsp, %rbp
.L65:
movl $0, (%rsp)
movl $3, 4(%rsp)
call _ZNSt12experimental15fundamentals_v217_S_randint_engineEv
movq %rax, %rsi
movq %rbp, %rdx
movq %rbp, %rdi
call _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE
movl %eax, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L65
.L63:
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L69
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L69:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE8142:
.size _Z8init_vecPii, .-_Z8init_vecPii
.section .rodata.str1.1
.LC8:
.string "seed = "
.LC9:
.string "\nn = "
.LC10:
.string "blockSize = "
.LC11:
.string "nBlocks = "
.LC12:
.string "sharedBytes = "
.section .rodata.str1.8
.align 8
.LC13:
.string "/home/ubuntu/Datasets/stackv2/train-structured/Nyriu/Esercizi_Esempi_CUDA/main/l08_s12_reducesum_my/main.cu"
.section .rodata.str1.1
.LC14:
.string "c_sum = "
.LC15:
.string "g_sum = "
.LC16:
.string "\nCorrect"
.LC17:
.string "\nWRONG!"
.text
.globl main
.type main, @function
main:
.LFB8147:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
call _ZNSt12experimental15fundamentals_v217_S_randint_engineEv
movq $1619508961, (%rax)
leaq .LC8(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $1619508961, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC9(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl n(%rip), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC10(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl blockSize(%rip), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC11(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl nBlocks(%rip), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC12(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl sharedBytes(%rip), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC2(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movslq n(%rip), %r12
salq $2, %r12
movl $4, %edi
call malloc@PLT
movq %rax, %rbp
movq %r12, %rdi
call malloc@PLT
movq %rax, %rbx
movq %rsp, %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $118, %edx
leaq .LC13(%rip), %r13
movq %r13, %rsi
call _ZL11HandleError9cudaErrorPKci
leaq 8(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movl %eax, %edi
movl $119, %edx
movq %r13, %rsi
call _ZL11HandleError9cudaErrorPKci
movl n(%rip), %esi
movq %rbx, %rdi
call _Z8init_vecPii
movl n(%rip), %esi
cmpl $31, %esi
jle .L77
.L71:
movl $1, %ecx
movq %r12, %rdx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $125, %edx
leaq .LC13(%rip), %rsi
call _ZL11HandleError9cudaErrorPKci
movl blockSize(%rip), %eax
movl %eax, 28(%rsp)
movl $1, 32(%rsp)
movl nBlocks(%rip), %eax
movl %eax, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movslq sharedBytes(%rip), %r8
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L78
.L72:
call cudaDeviceSynchronize@PLT
movl %eax, %edi
movl $129, %edx
leaq .LC13(%rip), %r12
movq %r12, %rsi
call _ZL11HandleError9cudaErrorPKci
movl $2, %ecx
movl $4, %edx
movq (%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $130, %edx
movq %r12, %rsi
call _ZL11HandleError9cudaErrorPKci
movl n(%rip), %esi
movq %rbx, %rdi
call _Z7cpu_sumPii
movl %eax, %ebx
leaq .LC14(%rip), %rsi
leaq _ZSt4cout(%rip), %r12
movq %r12, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl %ebx, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC15(%rip), %rsi
movq %r12, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 0(%rbp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
cmpl %ebx, 0(%rbp)
je .L79
leaq .LC17(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
.L74:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L80
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L77:
.cfi_restore_state
movq %rbx, %rdi
call _Z8show_vecPii
jmp .L71
.L78:
movl n(%rip), %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z39__device_stub__Z16sum_kernel_blockPiS_iPiS_i
jmp .L72
.L79:
leaq .LC16(%rip), %rsi
movq %r12, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
jmp .L74
.L80:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE8147:
.size main, .-main
.type _GLOBAL__sub_I__Z8init_vecPii, @function
_GLOBAL__sub_I__Z8init_vecPii:
.LFB9103:
.cfi_startproc
endbr64
movl blockSize(%rip), %ecx
leal 0(,%rcx,4), %esi
movl %esi, %eax
addl n(%rip), %eax
subl $1, %eax
cltd
idivl %esi
movl %eax, nBlocks(%rip)
movl %esi, sharedBytes(%rip)
ret
.cfi_endproc
.LFE9103:
.size _GLOBAL__sub_I__Z8init_vecPii, .-_GLOBAL__sub_I__Z8init_vecPii
.section .init_array
.align 8
.quad _GLOBAL__sub_I__Z8init_vecPii
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl sharedBytes
.bss
.align 4
.type sharedBytes, @object
.size sharedBytes, 4
sharedBytes:
.zero 4
.globl nBlocks
.align 4
.type nBlocks, @object
.size nBlocks, 4
nBlocks:
.zero 4
.globl blockSize
.data
.align 4
.type blockSize, @object
.size blockSize, 4
blockSize:
.long 2
.globl n
.align 4
.type n, @object
.size n, 4
n:
.long 16
.weak _ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng
.section .tbss._ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng,"awTG",@nobits,_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng,comdat
.align 8
.type _ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng, @gnu_unique_object
.size _ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng, 8
_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng:
.zero 8
.weak _ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng
.section .tbss._ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng,"awTG",@nobits,_ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng,comdat
.align 8
.type _ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng, @gnu_unique_object
.size _ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng, 8
_ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng:
.zero 8
.weak _ZN4cuda3std3__419piecewise_constructE
.section .rodata._ZN4cuda3std3__419piecewise_constructE,"aG",@progbits,_ZN4cuda3std3__419piecewise_constructE,comdat
.type _ZN4cuda3std3__419piecewise_constructE, @gnu_unique_object
.size _ZN4cuda3std3__419piecewise_constructE, 1
_ZN4cuda3std3__419piecewise_constructE:
.zero 1
.weak _ZN4cuda3std6ranges3__45__cpo9iter_moveE
.section .rodata._ZN4cuda3std6ranges3__45__cpo9iter_moveE,"aG",@progbits,_ZN4cuda3std6ranges3__45__cpo9iter_moveE,comdat
.type _ZN4cuda3std6ranges3__45__cpo9iter_moveE, @gnu_unique_object
.size _ZN4cuda3std6ranges3__45__cpo9iter_moveE, 1
_ZN4cuda3std6ranges3__45__cpo9iter_moveE:
.zero 1
.weak _ZN4cuda3std6ranges3__45__cpo4swapE
.section .rodata._ZN4cuda3std6ranges3__45__cpo4swapE,"aG",@progbits,_ZN4cuda3std6ranges3__45__cpo4swapE,comdat
.type _ZN4cuda3std6ranges3__45__cpo4swapE, @gnu_unique_object
.size _ZN4cuda3std6ranges3__45__cpo4swapE, 1
_ZN4cuda3std6ranges3__45__cpo4swapE:
.zero 1
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Esempio collective operations: reduce_sum
#include <cooperative_groups.h>
//using namespace cooperative_groups;
namespace cg = cooperative_groups;
#include <locale>
#include <stdlib.h>
#include <iostream>
#include <experimental/random>
#include <time.h>
#define RNG_MAX_VAL 3 // 5 // 50 // max rng val for array elems
static void HandleError( cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
std::cout << cudaGetErrorString( err ) << " in " << file << " line " << line << std::endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__))
void init_vec(int *v, int n) {
for (int i=0; i<n; i++) {
v[i] = std::experimental::randint(0,RNG_MAX_VAL);
//v[i] = i;
}
}
void show_vec(int *v, int n) {
std::cout << "\n" << v[0];
for (int i=1; i<n; i++) {
std::cout << ", " << v[i];
}
std::cout << "\n" << std::endl;
}
int cpu_sum(int *v, int n) {
int s=0;
for (int i=0; i<n; i++) {
s += v[i];
}
return s;
}
// Codice Prof
__device__ int reduce_sum(cg::thread_group g, int *temp, int val) {
int lane = g.thread_rank();
// ad ogni iterazione si dimezza il numero di thread attivi
// ogni thread somma parziale temp[i] a temp[lane+i]
for (int i=g.size()/2; i>0; i/=2) {
temp[lane] = val;
g.sync(); // attendo tutti thread del gruppo
if (lane < i) val += temp[lane+i];
g.sync();
}
return val; // solo thread 0 restituisce la somma completa
}
__device__ int thread_sum(int *input, int n) {
int sum=0;
for (int i=blockIdx.x * blockDim.x + threadIdx.x;
i<n/4;
i+=blockDim.x * gridDim.x) { // accesso strided
int4 in = ((int4*)input)[i]; // vector load e' piu' effciente
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_kernel_block(int *sum, int *input, int n) {
int my_sum = thread_sum(input, n);
extern __shared__ int temp[]; // extern perche' allocazione dinamica con
// terzo argomento della kernel call <<< ... >>>
//auto g = cg::this_thread_block();
cg::thread_block g = cg::this_thread_block();
int block_sum = reduce_sum(g, temp, my_sum);
if(g.thread_rank() == 0)
atomicAdd(sum, block_sum);
}
// END // Codice Prof
// ATTENZIONE!! Funziona solo con n=2^k con k>1
//int n = 1<<24; // array len = 16M // n=2^24 // bit shift operation
//int blockSize = 256;
////int nBlocks = (n+blockSize-1) / blockSize; // work as ceiling
//int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
//int sharedBytes = blockSize * sizeof(int);
// toy example
int n = 16;
int blockSize = 2;
int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
int sharedBytes = blockSize * sizeof(int);
int main( void ) {
//int seed = (int)time(NULL);
int seed = 1619508961;
std::experimental::reseed(seed);
std::cout << "seed = " << seed << std::endl;
std::cout << "\nn = " << n << std::endl;
std::cout << "blockSize = " << blockSize << std::endl;
std::cout << "nBlocks = " << nBlocks << std::endl;
std::cout << "sharedBytes = " << sharedBytes << "\n" << std::endl;
size_t data_size = (size_t)n*sizeof(int);
int *sum, *data;
sum = (int*)malloc(sizeof(int));
data = (int*)malloc(data_size);
int *d_sum, *d_data;
HANDLE_ERROR(cudaMalloc((void**)&d_sum, sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_data, data_size));
init_vec(data,n);
if (n < 32) // mostra il vettore solo se e' piccolo
show_vec(data,n);
HANDLE_ERROR(cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice));
sum_kernel_block<<<nBlocks, blockSize, sharedBytes>>>(d_sum, d_data, n);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(sum, d_sum, sizeof(int), cudaMemcpyDeviceToHost));
int c_sum = cpu_sum(data,n);
std::cout << "c_sum = " << c_sum << std::endl;
std::cout << "g_sum = " << *sum << std::endl;
if (c_sum == *sum)
std::cout << "\nCorrect" << std::endl;
else
std::cout << "\nWRONG!" << std::endl;
cudaFree(d_data);
cudaFree(d_sum);
return 0;
} | // Esempio collective operations: reduce_sum
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
//using namespace cooperative_groups;
namespace cg = cooperative_groups;
#include <locale>
#include <stdlib.h>
#include <iostream>
#include <experimental/random>
#include <time.h>
#define RNG_MAX_VAL 3 // 5 // 50 // max rng val for array elems
static void HandleError( hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
std::cout << hipGetErrorString( err ) << " in " << file << " line " << line << std::endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__))
void init_vec(int *v, int n) {
for (int i=0; i<n; i++) {
v[i] = std::experimental::randint(0,RNG_MAX_VAL);
//v[i] = i;
}
}
void show_vec(int *v, int n) {
std::cout << "\n" << v[0];
for (int i=1; i<n; i++) {
std::cout << ", " << v[i];
}
std::cout << "\n" << std::endl;
}
int cpu_sum(int *v, int n) {
int s=0;
for (int i=0; i<n; i++) {
s += v[i];
}
return s;
}
// Codice Prof
__device__ int reduce_sum(cg::thread_group g, int *temp, int val) {
int lane = g.thread_rank();
// ad ogni iterazione si dimezza il numero di thread attivi
// ogni thread somma parziale temp[i] a temp[lane+i]
for (int i=g.size()/2; i>0; i/=2) {
temp[lane] = val;
g.sync(); // attendo tutti thread del gruppo
if (lane < i) val += temp[lane+i];
g.sync();
}
return val; // solo thread 0 restituisce la somma completa
}
__device__ int thread_sum(int *input, int n) {
int sum=0;
for (int i=blockIdx.x * blockDim.x + threadIdx.x;
i<n/4;
i+=blockDim.x * gridDim.x) { // accesso strided
int4 in = ((int4*)input)[i]; // vector load e' piu' effciente
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_kernel_block(int *sum, int *input, int n) {
int my_sum = thread_sum(input, n);
extern __shared__ int temp[]; // extern perche' allocazione dinamica con
// terzo argomento della kernel call <<< ... >>>
//auto g = cg::this_thread_block();
cg::thread_block g = cg::this_thread_block();
int block_sum = reduce_sum(g, temp, my_sum);
if(g.thread_rank() == 0)
atomicAdd(sum, block_sum);
}
// END // Codice Prof
// ATTENZIONE!! Funziona solo con n=2^k con k>1
//int n = 1<<24; // array len = 16M // n=2^24 // bit shift operation
//int blockSize = 256;
////int nBlocks = (n+blockSize-1) / blockSize; // work as ceiling
//int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
//int sharedBytes = blockSize * sizeof(int);
// toy example
int n = 16;
int blockSize = 2;
int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
int sharedBytes = blockSize * sizeof(int);
int main( void ) {
//int seed = (int)time(NULL);
int seed = 1619508961;
std::experimental::reseed(seed);
std::cout << "seed = " << seed << std::endl;
std::cout << "\nn = " << n << std::endl;
std::cout << "blockSize = " << blockSize << std::endl;
std::cout << "nBlocks = " << nBlocks << std::endl;
std::cout << "sharedBytes = " << sharedBytes << "\n" << std::endl;
size_t data_size = (size_t)n*sizeof(int);
int *sum, *data;
sum = (int*)malloc(sizeof(int));
data = (int*)malloc(data_size);
int *d_sum, *d_data;
HANDLE_ERROR(hipMalloc((void**)&d_sum, sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&d_data, data_size));
init_vec(data,n);
if (n < 32) // mostra il vettore solo se e' piccolo
show_vec(data,n);
HANDLE_ERROR(hipMemcpy(d_data, data, data_size, hipMemcpyHostToDevice));
sum_kernel_block<<<nBlocks, blockSize, sharedBytes>>>(d_sum, d_data, n);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(sum, d_sum, sizeof(int), hipMemcpyDeviceToHost));
int c_sum = cpu_sum(data,n);
std::cout << "c_sum = " << c_sum << std::endl;
std::cout << "g_sum = " << *sum << std::endl;
if (c_sum == *sum)
std::cout << "\nCorrect" << std::endl;
else
std::cout << "\nWRONG!" << std::endl;
hipFree(d_data);
hipFree(d_sum);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // Esempio collective operations: reduce_sum
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
//using namespace cooperative_groups;
namespace cg = cooperative_groups;
#include <locale>
#include <stdlib.h>
#include <iostream>
#include <experimental/random>
#include <time.h>
#define RNG_MAX_VAL 3 // 5 // 50 // max rng val for array elems
static void HandleError( hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
std::cout << hipGetErrorString( err ) << " in " << file << " line " << line << std::endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__))
void init_vec(int *v, int n) {
for (int i=0; i<n; i++) {
v[i] = std::experimental::randint(0,RNG_MAX_VAL);
//v[i] = i;
}
}
void show_vec(int *v, int n) {
std::cout << "\n" << v[0];
for (int i=1; i<n; i++) {
std::cout << ", " << v[i];
}
std::cout << "\n" << std::endl;
}
int cpu_sum(int *v, int n) {
int s=0;
for (int i=0; i<n; i++) {
s += v[i];
}
return s;
}
// Codice Prof
__device__ int reduce_sum(cg::thread_group g, int *temp, int val) {
int lane = g.thread_rank();
// ad ogni iterazione si dimezza il numero di thread attivi
// ogni thread somma parziale temp[i] a temp[lane+i]
for (int i=g.size()/2; i>0; i/=2) {
temp[lane] = val;
g.sync(); // attendo tutti thread del gruppo
if (lane < i) val += temp[lane+i];
g.sync();
}
return val; // solo thread 0 restituisce la somma completa
}
__device__ int thread_sum(int *input, int n) {
int sum=0;
for (int i=blockIdx.x * blockDim.x + threadIdx.x;
i<n/4;
i+=blockDim.x * gridDim.x) { // accesso strided
int4 in = ((int4*)input)[i]; // vector load e' piu' effciente
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_kernel_block(int *sum, int *input, int n) {
int my_sum = thread_sum(input, n);
extern __shared__ int temp[]; // extern perche' allocazione dinamica con
// terzo argomento della kernel call <<< ... >>>
//auto g = cg::this_thread_block();
cg::thread_block g = cg::this_thread_block();
int block_sum = reduce_sum(g, temp, my_sum);
if(g.thread_rank() == 0)
atomicAdd(sum, block_sum);
}
// END // Codice Prof
// ATTENZIONE!! Funziona solo con n=2^k con k>1
//int n = 1<<24; // array len = 16M // n=2^24 // bit shift operation
//int blockSize = 256;
////int nBlocks = (n+blockSize-1) / blockSize; // work as ceiling
//int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
//int sharedBytes = blockSize * sizeof(int);
// toy example
int n = 16;
int blockSize = 2;
int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
int sharedBytes = blockSize * sizeof(int);
int main( void ) {
//int seed = (int)time(NULL);
int seed = 1619508961;
std::experimental::reseed(seed);
std::cout << "seed = " << seed << std::endl;
std::cout << "\nn = " << n << std::endl;
std::cout << "blockSize = " << blockSize << std::endl;
std::cout << "nBlocks = " << nBlocks << std::endl;
std::cout << "sharedBytes = " << sharedBytes << "\n" << std::endl;
size_t data_size = (size_t)n*sizeof(int);
int *sum, *data;
sum = (int*)malloc(sizeof(int));
data = (int*)malloc(data_size);
int *d_sum, *d_data;
HANDLE_ERROR(hipMalloc((void**)&d_sum, sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&d_data, data_size));
init_vec(data,n);
if (n < 32) // mostra il vettore solo se e' piccolo
show_vec(data,n);
HANDLE_ERROR(hipMemcpy(d_data, data, data_size, hipMemcpyHostToDevice));
sum_kernel_block<<<nBlocks, blockSize, sharedBytes>>>(d_sum, d_data, n);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(sum, d_sum, sizeof(int), hipMemcpyDeviceToHost));
int c_sum = cpu_sum(data,n);
std::cout << "c_sum = " << c_sum << std::endl;
std::cout << "g_sum = " << *sum << std::endl;
if (c_sum == *sum)
std::cout << "\nCorrect" << std::endl;
else
std::cout << "\nWRONG!" << std::endl;
hipFree(d_data);
hipFree(d_sum);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16sum_kernel_blockPiS_i
.globl _Z16sum_kernel_blockPiS_i
.p2align 8
.type _Z16sum_kernel_blockPiS_i,@function
_Z16sum_kernel_blockPiS_i:
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_mov_b32 s6, exec_lo
v_mov_b32_e32 v4, 0
s_waitcnt lgkmcnt(0)
s_cmp_lt_u32 s15, s8
s_cselect_b32 s4, 12, 18
s_delay_alu instid0(SALU_CYCLE_1)
v_mov_b32_e32 v1, s4
s_load_b32 s4, s[0:1], 0x10
global_load_u16 v5, v1, s[2:3]
v_and_b32_e32 v1, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_ashr_i32 s5, s4, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshr_b32 s5, s5, 30
s_add_i32 s4, s4, s5
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_ashr_i32 s7, s4, 2
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[2:3], null, s15, v5, v[1:2]
v_cmpx_gt_i32_e64 s7, v2
s_cbranch_execz .LBB0_4
s_load_b64 s[4:5], s[0:1], 0x8
v_mul_lo_u32 v5, s8, v5
v_mov_b32_e32 v4, 0
s_mov_b32 s8, 0
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[6:7], 4, v[2:3]
v_add_nc_u32_e32 v2, v2, v5
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, s4, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_cmp_le_i32_e32 vcc_lo, s7, v2
global_load_b128 v[6:9], v[6:7], off
s_or_b32 s8, vcc_lo, s8
s_waitcnt vmcnt(0)
v_add3_u32 v3, v6, v4, v7
v_add3_u32 v4, v3, v8, v9
s_and_not1_b32 exec_lo, exec_lo, s8
s_cbranch_execnz .LBB0_2
s_or_b32 exec_lo, exec_lo, s8
.LBB0_4:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s6
s_load_b64 s[4:5], s[2:3], 0xc
v_bfe_u32 v5, v0, 10, 10
v_bfe_u32 v0, v0, 20, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s4, 0xffff
s_lshr_b32 s3, s4, 16
s_and_b32 s4, s5, 0xffff
s_mul_i32 s5, s3, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s4, s5, s4
s_cmp_lt_u32 s4, 2
s_cbranch_scc1 .LBB0_10
v_mad_u32_u24 v6, v0, s3, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v6, s2, v[1:2]
v_lshl_add_u32 v3, v2, 2, 0
.p2align 6
.LBB0_6:
s_lshr_b32 s5, s4, 1
s_mov_b32 s6, exec_lo
ds_store_b32 v3, v4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_i32_e64 s5, v2
s_cbranch_execz .LBB0_8
v_lshl_add_u32 v6, s5, 2, v3
ds_load_b32 v6, v6
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v4, v6, v4
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s6
s_cmp_gt_u32 s4, 3
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_10
s_mov_b32 s4, s5
s_branch .LBB0_6
.LBB0_10:
v_mad_u32_u24 v0, v0, s3, v5
v_sub_nc_u32_e32 v1, 0, v1
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v0, s2
s_mov_b32 s2, 0
v_cmpx_eq_u32_e64 v0, v1
s_cbranch_execz .LBB0_15
s_mov_b32 s3, exec_lo
.LBB0_12:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ctz_i32_b32 s4, s3
v_readlane_b32 s5, v4, s4
s_lshl_b32 s4, 1, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_and_not1_b32 s3, s3, s4
s_add_i32 s2, s2, s5
s_cmp_lg_u32 s3, 0
s_cbranch_scc1 .LBB0_12
v_mbcnt_lo_u32_b32 v0, exec_lo, 0
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v0
s_xor_b32 s3, exec_lo, s3
s_cbranch_execz .LBB0_15
s_load_b64 s[0:1], s[0:1], 0x0
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
s_waitcnt lgkmcnt(0)
global_atomic_add_u32 v0, v1, s[0:1]
.LBB0_15:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16sum_kernel_blockPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16sum_kernel_blockPiS_i, .Lfunc_end0-_Z16sum_kernel_blockPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16sum_kernel_blockPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16sum_kernel_blockPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // Esempio collective operations: reduce_sum
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
//using namespace cooperative_groups;
namespace cg = cooperative_groups;
#include <locale>
#include <stdlib.h>
#include <iostream>
#include <experimental/random>
#include <time.h>
#define RNG_MAX_VAL 3 // 5 // 50 // max rng val for array elems
static void HandleError( hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
std::cout << hipGetErrorString( err ) << " in " << file << " line " << line << std::endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__))
void init_vec(int *v, int n) {
for (int i=0; i<n; i++) {
v[i] = std::experimental::randint(0,RNG_MAX_VAL);
//v[i] = i;
}
}
void show_vec(int *v, int n) {
std::cout << "\n" << v[0];
for (int i=1; i<n; i++) {
std::cout << ", " << v[i];
}
std::cout << "\n" << std::endl;
}
int cpu_sum(int *v, int n) {
int s=0;
for (int i=0; i<n; i++) {
s += v[i];
}
return s;
}
// Codice Prof
__device__ int reduce_sum(cg::thread_group g, int *temp, int val) {
int lane = g.thread_rank();
// ad ogni iterazione si dimezza il numero di thread attivi
// ogni thread somma parziale temp[i] a temp[lane+i]
for (int i=g.size()/2; i>0; i/=2) {
temp[lane] = val;
g.sync(); // attendo tutti thread del gruppo
if (lane < i) val += temp[lane+i];
g.sync();
}
return val; // solo thread 0 restituisce la somma completa
}
__device__ int thread_sum(int *input, int n) {
int sum=0;
for (int i=blockIdx.x * blockDim.x + threadIdx.x;
i<n/4;
i+=blockDim.x * gridDim.x) { // accesso strided
int4 in = ((int4*)input)[i]; // vector load e' piu' effciente
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_kernel_block(int *sum, int *input, int n) {
int my_sum = thread_sum(input, n);
extern __shared__ int temp[]; // extern perche' allocazione dinamica con
// terzo argomento della kernel call <<< ... >>>
//auto g = cg::this_thread_block();
cg::thread_block g = cg::this_thread_block();
int block_sum = reduce_sum(g, temp, my_sum);
if(g.thread_rank() == 0)
atomicAdd(sum, block_sum);
}
// END // Codice Prof
// ATTENZIONE!! Funziona solo con n=2^k con k>1
//int n = 1<<24; // array len = 16M // n=2^24 // bit shift operation
//int blockSize = 256;
////int nBlocks = (n+blockSize-1) / blockSize; // work as ceiling
//int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
//int sharedBytes = blockSize * sizeof(int);
// toy example
int n = 16;
int blockSize = 2;
int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
int sharedBytes = blockSize * sizeof(int);
int main( void ) {
//int seed = (int)time(NULL);
int seed = 1619508961;
std::experimental::reseed(seed);
std::cout << "seed = " << seed << std::endl;
std::cout << "\nn = " << n << std::endl;
std::cout << "blockSize = " << blockSize << std::endl;
std::cout << "nBlocks = " << nBlocks << std::endl;
std::cout << "sharedBytes = " << sharedBytes << "\n" << std::endl;
size_t data_size = (size_t)n*sizeof(int);
int *sum, *data;
sum = (int*)malloc(sizeof(int));
data = (int*)malloc(data_size);
int *d_sum, *d_data;
HANDLE_ERROR(hipMalloc((void**)&d_sum, sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&d_data, data_size));
init_vec(data,n);
if (n < 32) // mostra il vettore solo se e' piccolo
show_vec(data,n);
HANDLE_ERROR(hipMemcpy(d_data, data, data_size, hipMemcpyHostToDevice));
sum_kernel_block<<<nBlocks, blockSize, sharedBytes>>>(d_sum, d_data, n);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(sum, d_sum, sizeof(int), hipMemcpyDeviceToHost));
int c_sum = cpu_sum(data,n);
std::cout << "c_sum = " << c_sum << std::endl;
std::cout << "g_sum = " << *sum << std::endl;
if (c_sum == *sum)
std::cout << "\nCorrect" << std::endl;
else
std::cout << "\nWRONG!" << std::endl;
hipFree(d_data);
hipFree(d_sum);
return 0;
} | .text
.file "main.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z8init_vecPii # -- Begin function _Z8init_vecPii
.p2align 4, 0x90
.type _Z8init_vecPii,@function
_Z8init_vecPii: # @_Z8init_vecPii
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $5048, %rsp # imm = 0x13B8
.cfi_def_cfa_offset 5104
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
testl %esi, %esi
jle .LBB0_10
# %bb.1: # %.lr.ph
movq %rdi, %rbx
leaq 24(%rsp), %rcx
movq %fs:0, %rax
leaq _ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@TPOFF(%rax), %r14
movl %esi, %ebp
xorl %r12d, %r12d
leaq 48(%rsp), %rdi
leaq 40(%rsp), %r13
movq %rdi, %r15
.p2align 4, 0x90
.LBB0_2: # =>This Inner Loop Header: Depth=1
movabsq $12884901888, %rax # imm = 0x300000000
movq %rax, 40(%rsp)
cmpb $0, %fs:_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@TPOFF
je .LBB0_3
.LBB0_9: # %_ZNSt12experimental15fundamentals_v27randintIiEET_S2_S2_.exit
# in Loop: Header=BB0_2 Depth=1
movq %r13, %rdi
movq %r14, %rsi
movq %r13, %rdx
callq _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE
movq %r15, %rdi
leaq 24(%rsp), %rcx
movl %eax, (%rbx,%r12,4)
incq %r12
cmpq %r12, %rbp
jne .LBB0_2
jmp .LBB0_10
.LBB0_3: # in Loop: Header=BB0_2 Depth=1
movq %rcx, 8(%rsp)
movl $1953264993, 3(%rcx) # imm = 0x746C7561
movl $1634100580, (%rcx) # imm = 0x61666564
movq $7, 16(%rsp)
movb $0, 31(%rsp)
.Ltmp0:
leaq 8(%rsp), %rsi
callq _ZNSt13random_device7_M_initERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.Ltmp1:
# %bb.4: # in Loop: Header=BB0_2 Depth=1
movq 8(%rsp), %rdi
leaq 24(%rsp), %rax
cmpq %rax, %rdi
je .LBB0_6
# %bb.5: # %.critedge.i.i.i
# in Loop: Header=BB0_2 Depth=1
callq _ZdlPv
.LBB0_6: # %_ZNSt13random_deviceC2Ev.exit
# in Loop: Header=BB0_2 Depth=1
.Ltmp3:
movq %r15, %rdi
callq _ZNSt13random_device9_M_getvalEv
# kill: def $eax killed $eax def $rax
.Ltmp4:
# %bb.7: # %_ZNSt13random_deviceclEv.exit.i.i
# in Loop: Header=BB0_2 Depth=1
movl %eax, %ecx
leaq (%rcx,%rcx,2), %rcx
shrq $32, %rcx
movl %eax, %edx
subl %ecx, %edx
shrl %edx
addl %ecx, %edx
shrl $30, %edx
movl %edx, %ecx
shll $31, %ecx
orl %edx, %ecx
leal (%rax,%rcx), %edx
cmpl $1, %edx
adcl %eax, %ecx
movq %rcx, %fs:_ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@TPOFF
.Ltmp9:
leaq 48(%rsp), %rdi
callq _ZNSt13random_device7_M_finiEv
.Ltmp10:
# %bb.8: # %_ZNSt13random_deviceD2Ev.exit.i.i
# in Loop: Header=BB0_2 Depth=1
movb $1, %fs:_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@TPOFF
jmp .LBB0_9
.LBB0_10: # %._crit_edge
addq $5048, %rsp # imm = 0x13B8
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_14:
.cfi_def_cfa_offset 5104
.Ltmp11:
movq %rax, %rdi
callq __clang_call_terminate
.LBB0_16:
.Ltmp5:
movq %rax, %rbx
.Ltmp6:
leaq 48(%rsp), %rdi
callq _ZNSt13random_device7_M_finiEv
.Ltmp7:
jmp .LBB0_13
.LBB0_15:
.Ltmp8:
movq %rax, %rdi
callq __clang_call_terminate
.LBB0_11:
.Ltmp2:
movq %rax, %rbx
movq 8(%rsp), %rdi
leaq 24(%rsp), %rax
cmpq %rax, %rdi
je .LBB0_13
# %bb.12: # %.critedge.i.i6.i
callq _ZdlPv
.LBB0_13: # %common.resume
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end0:
.size _Z8init_vecPii, .Lfunc_end0-_Z8init_vecPii
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table0:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 3 # @TType Encoding = udata4
.uleb128 .Lttbase0-.Lttbaseref0
.Lttbaseref0:
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1
.uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2
.byte 0 # On action: cleanup
.uleb128 .Ltmp3-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp4-.Ltmp3 # Call between .Ltmp3 and .Ltmp4
.uleb128 .Ltmp5-.Lfunc_begin0 # jumps to .Ltmp5
.byte 0 # On action: cleanup
.uleb128 .Ltmp9-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp10-.Ltmp9 # Call between .Ltmp9 and .Ltmp10
.uleb128 .Ltmp11-.Lfunc_begin0 # jumps to .Ltmp11
.byte 1 # On action: 1
.uleb128 .Ltmp6-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp7-.Ltmp6 # Call between .Ltmp6 and .Ltmp7
.uleb128 .Ltmp8-.Lfunc_begin0 # jumps to .Ltmp8
.byte 1 # On action: 1
.uleb128 .Ltmp7-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Lfunc_end0-.Ltmp7 # Call between .Ltmp7 and .Lfunc_end0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.byte 1 # >> Action Record 1 <<
# Catch TypeInfo 1
.byte 0 # No further actions
.p2align 2, 0x0
# >> Catch TypeInfos <<
.long 0 # TypeInfo 1
.Lttbase0:
.p2align 2, 0x0
# -- End function
.text
.globl _Z8show_vecPii # -- Begin function _Z8show_vecPii
.p2align 4, 0x90
.type _Z8show_vecPii,@function
_Z8show_vecPii: # @_Z8show_vecPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebp
movq %rdi, %rbx
movl $1, %r14d
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%rbx), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
cmpl $2, %ebp
jl .LBB1_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $2, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%rbx,%r14,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
incq %r14
cmpq %r14, %r15
jne .LBB1_2
.LBB1_3: # %._crit_edge
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB1_8
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_6
# %bb.5:
movzbl 67(%rbx), %eax
jmp .LBB1_7
.LBB1_6:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB1_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp _ZNSo5flushEv # TAILCALL
.LBB1_8:
.cfi_def_cfa_offset 48
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size _Z8show_vecPii, .Lfunc_end1-_Z8show_vecPii
.cfi_endproc
# -- End function
.globl _Z7cpu_sumPii # -- Begin function _Z7cpu_sumPii
.p2align 4, 0x90
.type _Z7cpu_sumPii,@function
_Z7cpu_sumPii: # @_Z7cpu_sumPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB2_1
# %bb.3: # %.lr.ph.preheader
movl %esi, %ecx
xorl %edx, %edx
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addl (%rdi,%rdx,4), %eax
incq %rdx
cmpq %rdx, %rcx
jne .LBB2_4
# %bb.2: # %._crit_edge
retq
.LBB2_1:
xorl %eax, %eax
retq
.Lfunc_end2:
.size _Z7cpu_sumPii, .Lfunc_end2-_Z7cpu_sumPii
.cfi_endproc
# -- End function
.globl _Z31__device_stub__sum_kernel_blockPiS_i # -- Begin function _Z31__device_stub__sum_kernel_blockPiS_i
.p2align 4, 0x90
.type _Z31__device_stub__sum_kernel_blockPiS_i,@function
_Z31__device_stub__sum_kernel_blockPiS_i: # @_Z31__device_stub__sum_kernel_blockPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16sum_kernel_blockPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z31__device_stub__sum_kernel_blockPiS_i, .Lfunc_end3-_Z31__device_stub__sum_kernel_blockPiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin1:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception1
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $5096, %rsp # imm = 0x13E8
.cfi_def_cfa_offset 5136
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
cmpb $0, %fs:_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@TPOFF
je .LBB4_47
.LBB4_1: # %_ZNSt12experimental15fundamentals_v26reseedEm.exit
movq $1619508961, %fs:_ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@TPOFF # imm = 0x6087BEE1
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $1619508961, %esi # imm = 0x6087BEE1
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB4_50
# %bb.2: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB4_4
# %bb.3:
movzbl 67(%rbx), %ecx
jmp .LBB4_5
.LBB4_4:
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB4_5: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
.cfi_escape 0x2e, 0x00
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.4, %esi
movl $15, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl n(%rip), %esi
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB4_51
# %bb.6: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i16
cmpb $0, 56(%rbx)
je .LBB4_8
# %bb.7:
movzbl 67(%rbx), %ecx
jmp .LBB4_9
.LBB4_8:
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB4_9: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit19
.cfi_escape 0x2e, 0x00
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl blockSize(%rip), %esi
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB4_52
# %bb.10: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i21
cmpb $0, 56(%rbx)
je .LBB4_12
# %bb.11:
movzbl 67(%rbx), %ecx
jmp .LBB4_13
.LBB4_12:
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB4_13: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit24
.cfi_escape 0x2e, 0x00
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl nBlocks(%rip), %esi
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB4_53
# %bb.14: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i26
cmpb $0, 56(%rbx)
je .LBB4_16
# %bb.15:
movzbl 67(%rbx), %ecx
jmp .LBB4_17
.LBB4_16:
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB4_17: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit29
.cfi_escape 0x2e, 0x00
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl sharedBytes(%rip), %esi
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl $.L.str, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r14
testq %r14, %r14
je .LBB4_54
# %bb.18: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i31
cmpb $0, 56(%r14)
je .LBB4_20
# %bb.19:
movzbl 67(%r14), %eax
jmp .LBB4_21
.LBB4_20:
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB4_21: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit34
.cfi_escape 0x2e, 0x00
movsbl %al, %esi
movq %rbx, %rdi
callq _ZNSo3putEc
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
movslq n(%rip), %r14
shlq $2, %r14
.cfi_escape 0x2e, 0x00
movl $4, %edi
callq malloc
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
callq malloc
movq %rax, %r15
.cfi_escape 0x2e, 0x00
leaq 16(%rsp), %rdi
movl $4, %esi
callq hipMalloc
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl $120, %esi
callq _ZL11HandleError10hipError_tPKci
.cfi_escape 0x2e, 0x00
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl $121, %esi
callq _ZL11HandleError10hipError_tPKci
movl n(%rip), %esi
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _Z8init_vecPii
movl n(%rip), %esi
cmpl $31, %esi
jg .LBB4_23
# %bb.22:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _Z8show_vecPii
.LBB4_23:
movq 8(%rsp), %rdi
.cfi_escape 0x2e, 0x00
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl $127, %esi
callq _ZL11HandleError10hipError_tPKci
movl nBlocks(%rip), %edi
movl blockSize(%rip), %edx
movslq sharedBytes(%rip), %r8
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
orq %rax, %rdx
.cfi_escape 0x2e, 0x00
xorl %r14d, %r14d
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_25
# %bb.24:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movl n(%rip), %edx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl %edx, 28(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
.cfi_escape 0x2e, 0x00
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
.cfi_escape 0x2e, 0x10
leaq 96(%rsp), %r9
movl $_Z16sum_kernel_blockPiS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_25:
.cfi_escape 0x2e, 0x00
callq hipDeviceSynchronize
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl $131, %esi
callq _ZL11HandleError10hipError_tPKci
movq 16(%rsp), %rsi
.cfi_escape 0x2e, 0x00
movl $4, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
.cfi_escape 0x2e, 0x00
movl %eax, %edi
movl $132, %esi
callq _ZL11HandleError10hipError_tPKci
movl n(%rip), %eax
testl %eax, %eax
jle .LBB4_28
# %bb.26: # %.lr.ph.preheader.i
xorl %ecx, %ecx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB4_27: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
addl (%r15,%rcx,4), %r14d
incq %rcx
cmpq %rcx, %rax
jne .LBB4_27
.LBB4_28: # %_Z7cpu_sumPii.exit
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.9, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl %r14d, %esi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r15
testq %r15, %r15
je .LBB4_55
# %bb.29: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i36
cmpb $0, 56(%r15)
je .LBB4_31
# %bb.30:
movzbl 67(%r15), %ecx
jmp .LBB4_32
.LBB4_31:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
movq %rax, %r12
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r12, %rax
.LBB4_32: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit39
.cfi_escape 0x2e, 0x00
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.10, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%rbx), %esi
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r15
testq %r15, %r15
je .LBB4_56
# %bb.33: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i41
cmpb $0, 56(%r15)
je .LBB4_35
# %bb.34:
movzbl 67(%r15), %ecx
jmp .LBB4_36
.LBB4_35:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
movq %rax, %r12
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r12, %rax
.LBB4_36: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit44
.cfi_escape 0x2e, 0x00
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
cmpl (%rbx), %r14d
jne .LBB4_40
# %bb.37:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.11, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB4_57
# %bb.38: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i46
cmpb $0, 56(%rbx)
jne .LBB4_39
# %bb.43:
.cfi_escape 0x2e, 0x00
jmp .LBB4_45
.LBB4_40:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.12, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB4_58
# %bb.41: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i51
cmpb $0, 56(%rbx)
je .LBB4_44
.LBB4_39:
movzbl 67(%rbx), %eax
jmp .LBB4_46
.LBB4_44:
.cfi_escape 0x2e, 0x00
.LBB4_45: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit49
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB4_46: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit49
.cfi_escape 0x2e, 0x00
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
movq 8(%rsp), %rdi
.cfi_escape 0x2e, 0x00
callq hipFree
movq 16(%rsp), %rdi
.cfi_escape 0x2e, 0x00
callq hipFree
xorl %eax, %eax
addq $5096, %rsp # imm = 0x13E8
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB4_47:
.cfi_def_cfa_offset 5136
.cfi_escape 0x2e, 0x00
leaq 96(%rsp), %rbx
movq %rbx, %rdi
callq _ZNSt13random_deviceC2Ev
.Ltmp12:
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _ZNSt13random_device9_M_getvalEv
# kill: def $eax killed $eax def $rax
.Ltmp13:
# %bb.48: # %_ZNSt13random_deviceclEv.exit.i.i
movl %eax, %ecx
leaq (%rcx,%rcx,2), %rcx
shrq $32, %rcx
movl %eax, %edx
subl %ecx, %edx
shrl %edx
addl %ecx, %edx
shrl $30, %edx
movl %edx, %ecx
shll $31, %ecx
orl %edx, %ecx
leal (%rax,%rcx), %edx
cmpl $1, %edx
adcl %eax, %ecx
movq %rcx, %fs:_ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@TPOFF
.Ltmp18:
.cfi_escape 0x2e, 0x00
leaq 96(%rsp), %rdi
callq _ZNSt13random_device7_M_finiEv
.Ltmp19:
# %bb.49: # %_ZNSt13random_deviceD2Ev.exit.i.i
movb $1, %fs:_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng@TPOFF
jmp .LBB4_1
.LBB4_50:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.LBB4_51:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.LBB4_52:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.LBB4_53:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.LBB4_54:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.LBB4_55:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.LBB4_56:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.LBB4_57:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.LBB4_58:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.LBB4_59:
.Ltmp20:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq __clang_call_terminate
.LBB4_60:
.Ltmp14:
movq %rax, %rbx
.Ltmp15:
.cfi_escape 0x2e, 0x00
leaq 96(%rsp), %rdi
callq _ZNSt13random_device7_M_finiEv
.Ltmp16:
# %bb.61: # %_ZNSt13random_deviceD2Ev.exit1.i.i
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.LBB4_62:
.Ltmp17:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq __clang_call_terminate
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table4:
.Lexception1:
.byte 255 # @LPStart Encoding = omit
.byte 3 # @TType Encoding = udata4
.uleb128 .Lttbase1-.Lttbaseref1
.Lttbaseref1:
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end1-.Lcst_begin1
.Lcst_begin1:
.uleb128 .Lfunc_begin1-.Lfunc_begin1 # >> Call Site 1 <<
.uleb128 .Ltmp12-.Lfunc_begin1 # Call between .Lfunc_begin1 and .Ltmp12
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp12-.Lfunc_begin1 # >> Call Site 2 <<
.uleb128 .Ltmp13-.Ltmp12 # Call between .Ltmp12 and .Ltmp13
.uleb128 .Ltmp14-.Lfunc_begin1 # jumps to .Ltmp14
.byte 0 # On action: cleanup
.uleb128 .Ltmp18-.Lfunc_begin1 # >> Call Site 3 <<
.uleb128 .Ltmp19-.Ltmp18 # Call between .Ltmp18 and .Ltmp19
.uleb128 .Ltmp20-.Lfunc_begin1 # jumps to .Ltmp20
.byte 1 # On action: 1
.uleb128 .Ltmp19-.Lfunc_begin1 # >> Call Site 4 <<
.uleb128 .Ltmp15-.Ltmp19 # Call between .Ltmp19 and .Ltmp15
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp15-.Lfunc_begin1 # >> Call Site 5 <<
.uleb128 .Ltmp16-.Ltmp15 # Call between .Ltmp15 and .Ltmp16
.uleb128 .Ltmp17-.Lfunc_begin1 # jumps to .Ltmp17
.byte 1 # On action: 1
.uleb128 .Ltmp16-.Lfunc_begin1 # >> Call Site 6 <<
.uleb128 .Lfunc_end4-.Ltmp16 # Call between .Ltmp16 and .Lfunc_end4
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end1:
.byte 1 # >> Action Record 1 <<
# Catch TypeInfo 1
.byte 0 # No further actions
.p2align 2, 0x0
# >> Catch TypeInfos <<
.long 0 # TypeInfo 1
.Lttbase1:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function _ZL11HandleError10hipError_tPKci
.type _ZL11HandleError10hipError_tPKci,@function
_ZL11HandleError10hipError_tPKci: # @_ZL11HandleError10hipError_tPKci
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB5_2
# %bb.1:
retq
.LBB5_2:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movl %esi, %ebx
callq hipGetErrorString
movl $_ZSt4cout, %edi
movq %rax, %rsi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movl $.L.str.16, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movl $.L.str.8, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movl $.L.str.17, %esi
movq %rax, %rdi
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq %rax, %rdi
movl %ebx, %esi
callq _ZNSolsEi
movq %rax, %rdi
callq _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
movl $1, %edi
callq exit
.Lfunc_end5:
.size _ZL11HandleError10hipError_tPKci, .Lfunc_end5-_ZL11HandleError10hipError_tPKci
.cfi_endproc
# -- End function
.section .text._ZNSt13random_deviceC2Ev,"axG",@progbits,_ZNSt13random_deviceC2Ev,comdat
.weak _ZNSt13random_deviceC2Ev # -- Begin function _ZNSt13random_deviceC2Ev
.p2align 4, 0x90
.type _ZNSt13random_deviceC2Ev,@function
_ZNSt13random_deviceC2Ev: # @_ZNSt13random_deviceC2Ev
.Lfunc_begin2:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception2
# %bb.0: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IS3_EEPKcRKS3_.exit
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $40, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 24(%rsp), %r14
movq %r14, 8(%rsp)
movl $1634100580, 24(%rsp) # imm = 0x61666564
movl $1953264993, 27(%rsp) # imm = 0x746C7561
movq $7, 16(%rsp)
movb $0, 31(%rsp)
.Ltmp21:
leaq 8(%rsp), %rsi
callq _ZNSt13random_device7_M_initERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
.Ltmp22:
# %bb.1:
movq 8(%rsp), %rdi
cmpq %r14, %rdi
je .LBB6_3
# %bb.2: # %.critedge.i.i
callq _ZdlPv
.LBB6_3: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit
addq $40, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB6_4:
.cfi_def_cfa_offset 64
.Ltmp23:
movq %rax, %rbx
movq 8(%rsp), %rdi
cmpq %r14, %rdi
je .LBB6_6
# %bb.5: # %.critedge.i.i6
callq _ZdlPv
.LBB6_6: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit8
movq %rbx, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end6:
.size _ZNSt13random_deviceC2Ev, .Lfunc_end6-_ZNSt13random_deviceC2Ev
.cfi_endproc
.section .gcc_except_table._ZNSt13random_deviceC2Ev,"aG",@progbits,_ZNSt13random_deviceC2Ev,comdat
.p2align 2, 0x0
GCC_except_table6:
.Lexception2:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end2-.Lcst_begin2
.Lcst_begin2:
.uleb128 .Ltmp21-.Lfunc_begin2 # >> Call Site 1 <<
.uleb128 .Ltmp22-.Ltmp21 # Call between .Ltmp21 and .Ltmp22
.uleb128 .Ltmp23-.Lfunc_begin2 # jumps to .Ltmp23
.byte 0 # On action: cleanup
.uleb128 .Ltmp22-.Lfunc_begin2 # >> Call Site 2 <<
.uleb128 .Lfunc_end6-.Ltmp22 # Call between .Ltmp22 and .Lfunc_end6
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end2:
.p2align 2, 0x0
# -- End function
.section .text.__clang_call_terminate,"axG",@progbits,__clang_call_terminate,comdat
.hidden __clang_call_terminate # -- Begin function __clang_call_terminate
.weak __clang_call_terminate
.p2align 4, 0x90
.type __clang_call_terminate,@function
__clang_call_terminate: # @__clang_call_terminate
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq __cxa_begin_catch
callq _ZSt9terminatev
.Lfunc_end7:
.size __clang_call_terminate, .Lfunc_end7-__clang_call_terminate
.cfi_endproc
# -- End function
.section .text._ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE,"axG",@progbits,_ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE,comdat
.weak _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE # -- Begin function _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE
.p2align 4, 0x90
.type _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE,@function
_ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE: # @_ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r14
movslq 4(%rdx), %rbp
movq %rdx, 16(%rsp) # 8-byte Spill
movslq (%rdx), %rax
subq %rax, %rbp
cmpq $2147483644, %rbp # imm = 0x7FFFFFFC
ja .LBB8_4
# %bb.1:
incq %rbp
movl $2147483645, %eax # imm = 0x7FFFFFFD
xorl %edx, %edx
divq %rbp
movq %rax, %rcx
imulq %rax, %rbp
movq (%r14), %rdx
movabsq $8589934597, %rdi # imm = 0x200000005
.p2align 4, 0x90
.LBB8_2: # =>This Inner Loop Header: Depth=1
imulq $16807, %rdx, %rsi # imm = 0x41A7
movq %rsi, %rax
mulq %rdi
movq %rsi, %rax
subq %rdx, %rax
shrq %rax
addq %rdx, %rax
shrq $30, %rax
movq %rax, %rdx
shlq $31, %rdx
subq %rdx, %rax
leaq (%rsi,%rax), %rdx
addq %rsi, %rax
decq %rax
cmpq %rbp, %rax
jae .LBB8_2
# %bb.3:
movq %rdx, (%r14)
xorl %edx, %edx
divq %rcx
jmp .LBB8_9
.LBB8_4:
cmpq $2147483645, %rbp # imm = 0x7FFFFFFD
jne .LBB8_5
# %bb.8:
imulq $16807, (%r14), %rcx # imm = 0x41A7
movabsq $8589934597, %rdx # imm = 0x200000005
movq %rcx, %rax
mulq %rdx
movq %rcx, %rax
subq %rdx, %rax
shrq %rax
addq %rdx, %rax
shrq $30, %rax
movq %rax, %rdx
shlq $31, %rdx
subq %rdx, %rax
leaq (%rcx,%rax), %rdx
movq %rdx, (%r14)
addq %rcx, %rax
decq %rax
jmp .LBB8_9
.LBB8_5: # %.preheader
movq %rdi, %r15
movq %rbp, %rax
shrq %rax
movabsq $-9223372028264841207, %rcx # imm = 0x8000000200000009
mulq %rcx
movq %rdx, %r12
shrq $29, %r12
leaq 8(%rsp), %r13
movabsq $8589934597, %rbx # imm = 0x200000005
.p2align 4, 0x90
.LBB8_6: # =>This Inner Loop Header: Depth=1
movl $0, 8(%rsp)
movl %r12d, 12(%rsp)
movq %r15, %rdi
movq %r14, %rsi
movq %r13, %rdx
callq _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE
movslq %eax, %rsi
leaq (%rsi,%rsi), %rax
shlq $31, %rsi
subq %rax, %rsi
imulq $16807, (%r14), %rcx # imm = 0x41A7
movq %rcx, %rax
mulq %rbx
movq %rcx, %rax
subq %rdx, %rax
shrq %rax
addq %rdx, %rax
shrq $30, %rax
movq %rax, %rdx
shlq $31, %rdx
subq %rdx, %rax
addq %rcx, %rax
movq %rax, (%r14)
addq %rsi, %rax
decq %rax
cmpq %rbp, %rax
ja .LBB8_6
# %bb.7: # in Loop: Header=BB8_6 Depth=1
cmpq %rsi, %rax
jb .LBB8_6
.LBB8_9: # %.loopexit
movq 16(%rsp), %rcx # 8-byte Reload
addl (%rcx), %eax
# kill: def $eax killed $eax killed $rax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end8:
.size _ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE, .Lfunc_end8-_ZNSt24uniform_int_distributionIiEclISt26linear_congruential_engineImLm16807ELm0ELm2147483647EEEEiRT_RKNS0_10param_typeE
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB9_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB9_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16sum_kernel_blockPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end9:
.size __hip_module_ctor, .Lfunc_end9-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB10_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB10_2:
retq
.Lfunc_end10:
.size __hip_module_dtor, .Lfunc_end10-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n"
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz ", "
.size .L.str.1, 3
.type _Z16sum_kernel_blockPiS_i,@object # @_Z16sum_kernel_blockPiS_i
.section .rodata,"a",@progbits
.globl _Z16sum_kernel_blockPiS_i
.p2align 3, 0x0
_Z16sum_kernel_blockPiS_i:
.quad _Z31__device_stub__sum_kernel_blockPiS_i
.size _Z16sum_kernel_blockPiS_i, 8
.type n,@object # @n
.data
.globl n
.p2align 2, 0x0
n:
.long 16 # 0x10
.size n, 4
.type blockSize,@object # @blockSize
.globl blockSize
.p2align 2, 0x0
blockSize:
.long 2 # 0x2
.size blockSize, 4
.type nBlocks,@object # @nBlocks
.globl nBlocks
.p2align 2, 0x0
nBlocks:
.long 2 # 0x2
.size nBlocks, 4
.type sharedBytes,@object # @sharedBytes
.globl sharedBytes
.p2align 2, 0x0
sharedBytes:
.long 8 # 0x8
.size sharedBytes, 4
.type .L.str.3,@object # @.str.3
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.3:
.asciz "seed = "
.size .L.str.3, 8
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\nn = "
.size .L.str.4, 16
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "blockSize = "
.size .L.str.5, 15
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "nBlocks = "
.size .L.str.6, 15
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "sharedBytes = "
.size .L.str.7, 15
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/Nyriu/Esercizi_Esempi_CUDA/main/l08_s12_reducesum_my/main.hip"
.size .L.str.8, 119
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "c_sum = "
.size .L.str.9, 9
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "g_sum = "
.size .L.str.10, 9
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "\nCorrect"
.size .L.str.11, 9
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "\nWRONG!"
.size .L.str.12, 8
.type _ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng,@object # @_ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng
.section .tbss._ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng,"awTG",@nobits,_ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng,comdat
.weak _ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng
.p2align 3, 0x0
_ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng:
.zero 8
.size _ZZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng, 8
.type _ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng,@object # @_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng
.section .tbss._ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng,"awTG",@nobits,_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng,comdat
.weak _ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng
.p2align 3, 0x0
_ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng:
.quad 0 # 0x0
.size _ZGVZNSt12experimental15fundamentals_v217_S_randint_engineEvE5__eng, 8
.type .L.str.13,@object # @.str.13
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.13:
.asciz "default"
.size .L.str.13, 8
.type .L.str.16,@object # @.str.16
.L.str.16:
.asciz " in "
.size .L.str.16, 5
.type .L.str.17,@object # @.str.17
.L.str.17:
.asciz " line "
.size .L.str.17, 7
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16sum_kernel_blockPiS_i"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__sum_kernel_blockPiS_i
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _ZSt4cout
.addrsig_sym _Z16sum_kernel_blockPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16sum_kernel_blockPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR5, c[0x0][0x170] ; /* 0x00005c0000057ab9 */
/* 0x000fe20000000800 */
/*0030*/ BSSY B0, 0x1a0 ; /* 0x0000016000007945 */
/* 0x000fe20003800000 */
/*0040*/ USHF.R.S32.HI UR4, URZ, 0x1f, UR5 ; /* 0x0000001f3f047899 */
/* 0x000fe20008011405 */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0070*/ IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff087224 */
/* 0x000fe200078e00ff */
/*0080*/ ULEA.HI UR4, UR4, UR5, URZ, 0x2 ; /* 0x0000000504047291 */
/* 0x000fe2000f8f103f */
/*0090*/ S2R R2, SR_TID.Z ; /* 0x0000000000027919 */
/* 0x000e660000002300 */
/*00a0*/ USHF.R.S32.HI UR4, URZ, 0x2, UR4 ; /* 0x000000023f047899 */
/* 0x000fe20008011404 */
/*00b0*/ S2R R9, SR_TID.Y ; /* 0x0000000000097919 */
/* 0x000ea20000002200 */
/*00c0*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*00d0*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06270 */
/*00e0*/ @P0 BRA 0x190 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*00f0*/ HFMA2.MMA R8, -RZ, RZ, 0, 0 ; /* 0x00000000ff087435 */
/* 0x006fe400000001ff */
/*0100*/ IMAD.MOV.U32 R5, RZ, RZ, 0x10 ; /* 0x00000010ff057424 */
/* 0x000fc800078e00ff */
/*0110*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fcc00078e0205 */
/*0120*/ LDG.E.128 R4, [R4.64] ; /* 0x0000000604047981 */
/* 0x000ea2000c1e1d00 */
/*0130*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0b7624 */
/* 0x000fc800078e00ff */
/*0140*/ IMAD R0, R11, c[0x0][0xc], R0 ; /* 0x000003000b007a24 */
/* 0x000fca00078e0200 */
/*0150*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe4000bf06270 */
/*0160*/ IADD3 R8, R4, R5, R8 ; /* 0x0000000504087210 */
/* 0x004fc80007ffe008 */
/*0170*/ IADD3 R8, R7, R8, R6 ; /* 0x0000000807087210 */
/* 0x000fce0007ffe006 */
/*0180*/ @!P0 BRA 0x100 ; /* 0xffffff7000008947 */
/* 0x000fea000383ffff */
/*0190*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x006fea0003800000 */
/*01a0*/ ULDC.64 UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe20000000a00 */
/*01b0*/ IMAD R0, R2, c[0x0][0x4], R9 ; /* 0x0000010002007a24 */
/* 0x000fe200078e0209 */
/*01c0*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fe2000f8e023f */
/*01d0*/ IADD3 R5, -R3, RZ, RZ ; /* 0x000000ff03057210 */
/* 0x000fe40007ffe1ff */
/*01e0*/ ULDC UR5, c[0x0][0x8] ; /* 0x0000020000057ab9 */
/* 0x000fe20000000800 */
/*01f0*/ IMAD R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a24 */
/* 0x000fe200078e02ff */
/*0200*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fc8000f8e023f */
/*0210*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fe20008011604 */
/*0220*/ ISETP.NE.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x000fca0003f05270 */
/*0230*/ ISETP.NE.AND P1, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fda000bf25270 */
/*0240*/ @!P1 BRA 0x330 ; /* 0x000000e000009947 */
/* 0x000fea0003800000 */
/*0250*/ IMAD.IADD R3, R0, 0x1, R3 ; /* 0x0000000100037824 */
/* 0x000fe400078e0203 */
/*0260*/ IMAD.U32 R0, RZ, RZ, UR4 ; /* 0x00000004ff007e24 */
/* 0x000fc6000f8e00ff */
/*0270*/ SHF.L.U32 R5, R3, 0x2, RZ ; /* 0x0000000203057819 */
/* 0x000fe400000006ff */
/*0280*/ ISETP.GT.AND P1, PT, R0.reuse, R3, PT ; /* 0x000000030000720c */
/* 0x040fe20003f24270 */
/*0290*/ STS [R3.X4], R8 ; /* 0x0000000803007388 */
/* 0x000fe80000004800 */
/*02a0*/ BAR.SYNC 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000000000 */
/*02b0*/ ISETP.GT.AND P2, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fc40003f44270 */
/*02c0*/ @P1 IMAD R2, R0.reuse, 0x4, R5 ; /* 0x0000000400021824 */
/* 0x040fe200078e0205 */
/*02d0*/ LEA.HI R4, R0, R0, RZ, 0x1 ; /* 0x0000000000047211 */
/* 0x000fc800078f08ff */
/*02e0*/ @P1 LDS R7, [R2] ; /* 0x0000000002071984 */
/* 0x000e220000000800 */
/*02f0*/ SHF.R.S32.HI R0, RZ, 0x1, R4 ; /* 0x00000001ff007819 */
/* 0x000fe20000011404 */
/*0300*/ @P1 IMAD.IADD R8, R8, 0x1, R7 ; /* 0x0000000108081824 */
/* 0x001fe400078e0207 */
/*0310*/ BAR.SYNC 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000000000 */
/*0320*/ @P2 BRA 0x280 ; /* 0xffffff5000002947 */
/* 0x000fea000383ffff */
/*0330*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0340*/ S2R R0, SR_LANEID ; /* 0x0000000000007919 */
/* 0x000e220000000000 */
/*0350*/ REDUX.SUM UR5, R8 ; /* 0x00000000080573c4 */
/* 0x000e62000000c000 */
/*0360*/ VOTEU.ANY UR4, UPT, PT ; /* 0x0000000000047886 */
/* 0x000fe200038e0100 */
/*0370*/ MOV R2, c[0x0][0x160] ; /* 0x0000580000027a02 */
/* 0x000fe20000000f00 */
/*0380*/ UFLO.U32 UR4, UR4 ; /* 0x00000004000472bd */
/* 0x000fe200080e0000 */
/*0390*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fca00078e00ff */
/*03a0*/ ISETP.EQ.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x001fe4000bf02070 */
/*03b0*/ MOV R5, UR5 ; /* 0x0000000500057c02 */
/* 0x002fd60008000f00 */
/*03c0*/ @P0 RED.E.ADD.STRONG.GPU [R2.64], R5 ; /* 0x000000050200098e */
/* 0x000fe2000c10e186 */
/*03d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03e0*/ BRA 0x3e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16sum_kernel_blockPiS_i
.globl _Z16sum_kernel_blockPiS_i
.p2align 8
.type _Z16sum_kernel_blockPiS_i,@function
_Z16sum_kernel_blockPiS_i:
s_load_b32 s8, s[0:1], 0x18
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_mov_b32 s6, exec_lo
v_mov_b32_e32 v4, 0
s_waitcnt lgkmcnt(0)
s_cmp_lt_u32 s15, s8
s_cselect_b32 s4, 12, 18
s_delay_alu instid0(SALU_CYCLE_1)
v_mov_b32_e32 v1, s4
s_load_b32 s4, s[0:1], 0x10
global_load_u16 v5, v1, s[2:3]
v_and_b32_e32 v1, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_ashr_i32 s5, s4, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshr_b32 s5, s5, 30
s_add_i32 s4, s4, s5
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_ashr_i32 s7, s4, 2
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[2:3], null, s15, v5, v[1:2]
v_cmpx_gt_i32_e64 s7, v2
s_cbranch_execz .LBB0_4
s_load_b64 s[4:5], s[0:1], 0x8
v_mul_lo_u32 v5, s8, v5
v_mov_b32_e32 v4, 0
s_mov_b32 s8, 0
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[6:7], 4, v[2:3]
v_add_nc_u32_e32 v2, v2, v5
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, s4, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_cmp_le_i32_e32 vcc_lo, s7, v2
global_load_b128 v[6:9], v[6:7], off
s_or_b32 s8, vcc_lo, s8
s_waitcnt vmcnt(0)
v_add3_u32 v3, v6, v4, v7
v_add3_u32 v4, v3, v8, v9
s_and_not1_b32 exec_lo, exec_lo, s8
s_cbranch_execnz .LBB0_2
s_or_b32 exec_lo, exec_lo, s8
.LBB0_4:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s6
s_load_b64 s[4:5], s[2:3], 0xc
v_bfe_u32 v5, v0, 10, 10
v_bfe_u32 v0, v0, 20, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s4, 0xffff
s_lshr_b32 s3, s4, 16
s_and_b32 s4, s5, 0xffff
s_mul_i32 s5, s3, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s4, s5, s4
s_cmp_lt_u32 s4, 2
s_cbranch_scc1 .LBB0_10
v_mad_u32_u24 v6, v0, s3, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v6, s2, v[1:2]
v_lshl_add_u32 v3, v2, 2, 0
.p2align 6
.LBB0_6:
s_lshr_b32 s5, s4, 1
s_mov_b32 s6, exec_lo
ds_store_b32 v3, v4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_i32_e64 s5, v2
s_cbranch_execz .LBB0_8
v_lshl_add_u32 v6, s5, 2, v3
ds_load_b32 v6, v6
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v4, v6, v4
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s6
s_cmp_gt_u32 s4, 3
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_10
s_mov_b32 s4, s5
s_branch .LBB0_6
.LBB0_10:
v_mad_u32_u24 v0, v0, s3, v5
v_sub_nc_u32_e32 v1, 0, v1
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v0, s2
s_mov_b32 s2, 0
v_cmpx_eq_u32_e64 v0, v1
s_cbranch_execz .LBB0_15
s_mov_b32 s3, exec_lo
.LBB0_12:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ctz_i32_b32 s4, s3
v_readlane_b32 s5, v4, s4
s_lshl_b32 s4, 1, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_and_not1_b32 s3, s3, s4
s_add_i32 s2, s2, s5
s_cmp_lg_u32 s3, 0
s_cbranch_scc1 .LBB0_12
v_mbcnt_lo_u32_b32 v0, exec_lo, 0
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v0
s_xor_b32 s3, exec_lo, s3
s_cbranch_execz .LBB0_15
s_load_b64 s[0:1], s[0:1], 0x0
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
s_waitcnt lgkmcnt(0)
global_atomic_add_u32 v0, v1, s[0:1]
.LBB0_15:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16sum_kernel_blockPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16sum_kernel_blockPiS_i, .Lfunc_end0-_Z16sum_kernel_blockPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16sum_kernel_blockPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16sum_kernel_blockPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_DIM 2 //размер субматрицы
int M, K;
using namespace std;
__global__ void matrixAdd (int *A, int *B, int *C, int M, int K) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col * M + row;
//сложение на GPU
if (col < M && row < K) {
C[index] = A[index] + B[index];
}
}
int main() {
cout << "M: ";
cin >> M;
cout << "K: ";
cin >> K;
int *A = new int [M*K];
int *B = new int [M*K];
int *C = new int [M*K];
//заполнение матриц
for(int i=0; i<M; i++)
for (int j=0; j<K; j++){
A[i*M+j] = 2;
B[i*M+j] = 1;
C[i*M+j] = 0;
}
int *dev_a, *dev_b, *dev_c; //указатели на выделяемую память
int size = M * K * sizeof(int); //выделяемая память
cudaMalloc((void**)&dev_a, size); //выделение памяти
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, size);
cudaMemcpy(dev_a, A, size, cudaMemcpyHostToDevice); //копирование на GPU
cudaMemcpy(dev_b, B, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); //число выделенных блоков
dim3 dimGrid((M+dimBlock.x-1)/dimBlock.x, (K+dimBlock.y-1)/dimBlock.y); //размер и размерность сетки
printf("dimGrid.x = %d, dimGrid.y = %d\n", dimGrid.x, dimGrid.y); //выводится размер сетки
matrixAdd<<<dimGrid,dimBlock>>>(dev_a, dev_b, dev_c, M, K); //вызов ядра
cudaDeviceSynchronize();
cudaMemcpy(C, dev_c, size, cudaMemcpyDeviceToHost);
//вывод результата
printf("Result Matrix C:\n");
for(int i=0; i<M; i++){
for (int j=0; j<K; j++){
printf("%d\t", C[i] );
}
printf("\n");
}
cudaFree(dev_a); //освобождение памяти
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | code for sm_80
Function : _Z9matrixAddPiS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R0, c[0x0][0x178], R3 ; /* 0x00005e0000007a24 */
/* 0x000fe200078e0203 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0207 */
/*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe200078e0207 */
/*0120*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_DIM 2 //размер субматрицы
int M, K;
using namespace std;
__global__ void matrixAdd (int *A, int *B, int *C, int M, int K) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col * M + row;
//сложение на GPU
if (col < M && row < K) {
C[index] = A[index] + B[index];
}
}
int main() {
cout << "M: ";
cin >> M;
cout << "K: ";
cin >> K;
int *A = new int [M*K];
int *B = new int [M*K];
int *C = new int [M*K];
//заполнение матриц
for(int i=0; i<M; i++)
for (int j=0; j<K; j++){
A[i*M+j] = 2;
B[i*M+j] = 1;
C[i*M+j] = 0;
}
int *dev_a, *dev_b, *dev_c; //указатели на выделяемую память
int size = M * K * sizeof(int); //выделяемая память
cudaMalloc((void**)&dev_a, size); //выделение памяти
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, size);
cudaMemcpy(dev_a, A, size, cudaMemcpyHostToDevice); //копирование на GPU
cudaMemcpy(dev_b, B, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); //число выделенных блоков
dim3 dimGrid((M+dimBlock.x-1)/dimBlock.x, (K+dimBlock.y-1)/dimBlock.y); //размер и размерность сетки
printf("dimGrid.x = %d, dimGrid.y = %d\n", dimGrid.x, dimGrid.y); //выводится размер сетки
matrixAdd<<<dimGrid,dimBlock>>>(dev_a, dev_b, dev_c, M, K); //вызов ядра
cudaDeviceSynchronize();
cudaMemcpy(C, dev_c, size, cudaMemcpyDeviceToHost);
//вывод результата
printf("Result Matrix C:\n");
for(int i=0; i<M; i++){
for (int j=0; j<K; j++){
printf("%d\t", C[i] );
}
printf("\n");
}
cudaFree(dev_a); //освобождение памяти
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | .file "tmpxft_0007c340_00000000-6_sum_two-dimensional_arrays.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii
.type _Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii, @function
_Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9matrixAddPiS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii, .-_Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii
.globl _Z9matrixAddPiS_S_ii
.type _Z9matrixAddPiS_S_ii, @function
_Z9matrixAddPiS_S_ii:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z9matrixAddPiS_S_ii, .-_Z9matrixAddPiS_S_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "M: "
.LC1:
.string "K: "
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "dimGrid.x = %d, dimGrid.y = %d\n"
.section .rodata.str1.1
.LC3:
.string "Result Matrix C:\n"
.LC4:
.string "%d\t"
.LC5:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq M(%rip), %rsi
leaq _ZSt3cin(%rip), %rbx
movq %rbx, %rdi
call _ZNSirsERi@PLT
leaq .LC1(%rip), %rsi
movq %rbp, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq K(%rip), %rsi
movq %rbx, %rdi
call _ZNSirsERi@PLT
movl M(%rip), %eax
imull K(%rip), %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L12
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %r12
movl M(%rip), %eax
imull K(%rip), %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L38
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %rbx
movl M(%rip), %eax
imull K(%rip), %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L39
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %rbp
movl M(%rip), %edi
testl %edi, %edi
jle .L19
movl K(%rip), %r8d
movl $0, %esi
movl $0, %ecx
movslq %r8d, %r9
jmp .L20
.L12:
movq 56(%rsp), %rax
subq %fs:40, %rax
je .L15
call __stack_chk_fail@PLT
.L15:
call __cxa_throw_bad_array_new_length@PLT
.L38:
movq 56(%rsp), %rax
subq %fs:40, %rax
je .L18
call __stack_chk_fail@PLT
.L18:
call __cxa_throw_bad_array_new_length@PLT
.L39:
movq 56(%rsp), %rax
subq %fs:40, %rax
je .L21
call __stack_chk_fail@PLT
.L21:
call __cxa_throw_bad_array_new_length@PLT
.L23:
movslq %esi, %rdx
leaq 0(,%rdx,4), %rax
addq %r9, %rdx
salq $2, %rdx
.L22:
movl $2, (%r12,%rax)
movl $1, (%rbx,%rax)
movl $0, 0(%rbp,%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L22
.L24:
addl $1, %ecx
addl %edi, %esi
cmpl %edi, %ecx
je .L19
.L20:
testl %r8d, %r8d
jg .L23
jmp .L24
.L19:
imull K(%rip), %edi
leal 0(,%rdi,4), %r13d
movslq %r13d, %r13
leaq 8(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r13, %rdx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r13, %rdx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 40(%rsp)
movl K(%rip), %eax
leal 1(%rax), %ebx
shrl %ebx
movl M(%rip), %eax
leal 1(%rax), %r12d
shrl %r12d
movl $1, 52(%rsp)
movl %ebx, %ecx
movl %r12d, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r12d, 44(%rsp)
movl %ebx, 48(%rsp)
movl $2, 32(%rsp)
movl $2, 36(%rsp)
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl 52(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L40
.L25:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %r13, %rdx
movq 24(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, M(%rip)
jle .L26
movl $0, %r13d
leaq .LC4(%rip), %r12
leaq .LC5(%rip), %r14
jmp .L27
.L40:
movl K(%rip), %r8d
movl M(%rip), %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii
jmp .L25
.L28:
movl 0(%rbp), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, K(%rip)
jg .L28
.L29:
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addq $4, %rbp
cmpl %r13d, M(%rip)
jle .L26
.L27:
movl $0, %ebx
cmpl $0, K(%rip)
jg .L28
jmp .L29
.L26:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L41
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z9matrixAddPiS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z9matrixAddPiS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl K
.bss
.align 4
.type K, @object
.size K, 4
K:
.zero 4
.globl M
.align 4
.type M, @object
.size M, 4
M:
.zero 4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_DIM 2 //размер субматрицы
int M, K;
using namespace std;
__global__ void matrixAdd (int *A, int *B, int *C, int M, int K) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col * M + row;
//сложение на GPU
if (col < M && row < K) {
C[index] = A[index] + B[index];
}
}
int main() {
cout << "M: ";
cin >> M;
cout << "K: ";
cin >> K;
int *A = new int [M*K];
int *B = new int [M*K];
int *C = new int [M*K];
//заполнение матриц
for(int i=0; i<M; i++)
for (int j=0; j<K; j++){
A[i*M+j] = 2;
B[i*M+j] = 1;
C[i*M+j] = 0;
}
int *dev_a, *dev_b, *dev_c; //указатели на выделяемую память
int size = M * K * sizeof(int); //выделяемая память
cudaMalloc((void**)&dev_a, size); //выделение памяти
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, size);
cudaMemcpy(dev_a, A, size, cudaMemcpyHostToDevice); //копирование на GPU
cudaMemcpy(dev_b, B, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); //число выделенных блоков
dim3 dimGrid((M+dimBlock.x-1)/dimBlock.x, (K+dimBlock.y-1)/dimBlock.y); //размер и размерность сетки
printf("dimGrid.x = %d, dimGrid.y = %d\n", dimGrid.x, dimGrid.y); //выводится размер сетки
matrixAdd<<<dimGrid,dimBlock>>>(dev_a, dev_b, dev_c, M, K); //вызов ядра
cudaDeviceSynchronize();
cudaMemcpy(C, dev_c, size, cudaMemcpyDeviceToHost);
//вывод результата
printf("Result Matrix C:\n");
for(int i=0; i<M; i++){
for (int j=0; j<K; j++){
printf("%d\t", C[i] );
}
printf("\n");
}
cudaFree(dev_a); //освобождение памяти
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} | #include <iostream>
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_DIM 2 //размер субматрицы
int M, K;
using namespace std;
__global__ void matrixAdd (int *A, int *B, int *C, int M, int K) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col * M + row;
//сложение на GPU
if (col < M && row < K) {
C[index] = A[index] + B[index];
}
}
int main() {
cout << "M: ";
cin >> M;
cout << "K: ";
cin >> K;
int *A = new int [M*K];
int *B = new int [M*K];
int *C = new int [M*K];
//заполнение матриц
for(int i=0; i<M; i++)
for (int j=0; j<K; j++){
A[i*M+j] = 2;
B[i*M+j] = 1;
C[i*M+j] = 0;
}
int *dev_a, *dev_b, *dev_c; //указатели на выделяемую память
int size = M * K * sizeof(int); //выделяемая память
hipMalloc((void**)&dev_a, size); //выделение памяти
hipMalloc((void**)&dev_b, size);
hipMalloc((void**)&dev_c, size);
hipMemcpy(dev_a, A, size, hipMemcpyHostToDevice); //копирование на GPU
hipMemcpy(dev_b, B, size, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); //число выделенных блоков
dim3 dimGrid((M+dimBlock.x-1)/dimBlock.x, (K+dimBlock.y-1)/dimBlock.y); //размер и размерность сетки
printf("dimGrid.x = %d, dimGrid.y = %d\n", dimGrid.x, dimGrid.y); //выводится размер сетки
matrixAdd<<<dimGrid,dimBlock>>>(dev_a, dev_b, dev_c, M, K); //вызов ядра
hipDeviceSynchronize();
hipMemcpy(C, dev_c, size, hipMemcpyDeviceToHost);
//вывод результата
printf("Result Matrix C:\n");
for(int i=0; i<M; i++){
for (int j=0; j<K; j++){
printf("%d\t", C[i] );
}
printf("\n");
}
hipFree(dev_a); //освобождение памяти
hipFree(dev_b);
hipFree(dev_c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_DIM 2 //размер субматрицы
int M, K;
using namespace std;
__global__ void matrixAdd (int *A, int *B, int *C, int M, int K) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col * M + row;
//сложение на GPU
if (col < M && row < K) {
C[index] = A[index] + B[index];
}
}
int main() {
cout << "M: ";
cin >> M;
cout << "K: ";
cin >> K;
int *A = new int [M*K];
int *B = new int [M*K];
int *C = new int [M*K];
//заполнение матриц
for(int i=0; i<M; i++)
for (int j=0; j<K; j++){
A[i*M+j] = 2;
B[i*M+j] = 1;
C[i*M+j] = 0;
}
int *dev_a, *dev_b, *dev_c; //указатели на выделяемую память
int size = M * K * sizeof(int); //выделяемая память
hipMalloc((void**)&dev_a, size); //выделение памяти
hipMalloc((void**)&dev_b, size);
hipMalloc((void**)&dev_c, size);
hipMemcpy(dev_a, A, size, hipMemcpyHostToDevice); //копирование на GPU
hipMemcpy(dev_b, B, size, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); //число выделенных блоков
dim3 dimGrid((M+dimBlock.x-1)/dimBlock.x, (K+dimBlock.y-1)/dimBlock.y); //размер и размерность сетки
printf("dimGrid.x = %d, dimGrid.y = %d\n", dimGrid.x, dimGrid.y); //выводится размер сетки
matrixAdd<<<dimGrid,dimBlock>>>(dev_a, dev_b, dev_c, M, K); //вызов ядра
hipDeviceSynchronize();
hipMemcpy(C, dev_c, size, hipMemcpyDeviceToHost);
//вывод результата
printf("Result Matrix C:\n");
for(int i=0; i<M; i++){
for (int j=0; j<K; j++){
printf("%d\t", C[i] );
}
printf("\n");
}
hipFree(dev_a); //освобождение памяти
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9matrixAddPiS_S_ii
.globl _Z9matrixAddPiS_S_ii
.p2align 8
.type _Z9matrixAddPiS_S_ii,@function
_Z9matrixAddPiS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b128 s[8:11], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v0, s4, v[1:2]
s_load_b64 s[0:1], s[0:1], 0x10
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s8, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9matrixAddPiS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9matrixAddPiS_S_ii, .Lfunc_end0-_Z9matrixAddPiS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9matrixAddPiS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9matrixAddPiS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_DIM 2 //размер субматрицы
int M, K;
using namespace std;
__global__ void matrixAdd (int *A, int *B, int *C, int M, int K) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col * M + row;
//сложение на GPU
if (col < M && row < K) {
C[index] = A[index] + B[index];
}
}
int main() {
cout << "M: ";
cin >> M;
cout << "K: ";
cin >> K;
int *A = new int [M*K];
int *B = new int [M*K];
int *C = new int [M*K];
//заполнение матриц
for(int i=0; i<M; i++)
for (int j=0; j<K; j++){
A[i*M+j] = 2;
B[i*M+j] = 1;
C[i*M+j] = 0;
}
int *dev_a, *dev_b, *dev_c; //указатели на выделяемую память
int size = M * K * sizeof(int); //выделяемая память
hipMalloc((void**)&dev_a, size); //выделение памяти
hipMalloc((void**)&dev_b, size);
hipMalloc((void**)&dev_c, size);
hipMemcpy(dev_a, A, size, hipMemcpyHostToDevice); //копирование на GPU
hipMemcpy(dev_b, B, size, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); //число выделенных блоков
dim3 dimGrid((M+dimBlock.x-1)/dimBlock.x, (K+dimBlock.y-1)/dimBlock.y); //размер и размерность сетки
printf("dimGrid.x = %d, dimGrid.y = %d\n", dimGrid.x, dimGrid.y); //выводится размер сетки
matrixAdd<<<dimGrid,dimBlock>>>(dev_a, dev_b, dev_c, M, K); //вызов ядра
hipDeviceSynchronize();
hipMemcpy(C, dev_c, size, hipMemcpyDeviceToHost);
//вывод результата
printf("Result Matrix C:\n");
for(int i=0; i<M; i++){
for (int j=0; j<K; j++){
printf("%d\t", C[i] );
}
printf("\n");
}
hipFree(dev_a); //освобождение памяти
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | .text
.file "sum_two-dimensional_arrays.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z24__device_stub__matrixAddPiS_S_ii # -- Begin function _Z24__device_stub__matrixAddPiS_S_ii
.p2align 4, 0x90
.type _Z24__device_stub__matrixAddPiS_S_ii,@function
_Z24__device_stub__matrixAddPiS_S_ii: # @_Z24__device_stub__matrixAddPiS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9matrixAddPiS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__matrixAddPiS_S_ii, .Lfunc_end0-_Z24__device_stub__matrixAddPiS_S_ii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $3, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt3cin, %edi
movl $M, %esi
callq _ZNSirsERi
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $3, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt3cin, %edi
movl $K, %esi
callq _ZNSirsERi
movl M(%rip), %r14d
movl K(%rip), %ebp
movl %ebp, %eax
imull %r14d, %eax
movl %eax, 28(%rsp) # 4-byte Spill
cltq
leaq (,%rax,4), %rcx
testl %eax, %eax
movq $-1, %rbx
cmovnsq %rcx, %rbx
movq %rbx, %rdi
callq _Znam
movq %rax, 40(%rsp) # 8-byte Spill
movq %rbx, %rdi
callq _Znam
movq %rax, 48(%rsp) # 8-byte Spill
movq %rbx, %rdi
callq _Znam
movq %r14, %rdx
movq %rax, %rbx
testl %edx, %edx
jle .LBB1_6
# %bb.1: # %.preheader41.lr.ph
leaq (,%rbp,4), %rax
movq %rax, 56(%rsp) # 8-byte Spill
xorl %r15d, %r15d
xorl %r12d, %r12d
movq %rdx, 64(%rsp) # 8-byte Spill
jmp .LBB1_2
.p2align 4, 0x90
.LBB1_5: # %._crit_edge
# in Loop: Header=BB1_2 Depth=1
incq %r12
addl %edx, %r15d
cmpq %rdx, %r12
je .LBB1_6
.LBB1_2: # %.preheader41
# =>This Loop Header: Depth=1
# Child Loop BB1_4 Depth 2
testl %ebp, %ebp
jle .LBB1_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB1_2 Depth=1
movl %r15d, %eax
movq 48(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
movq 40(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
movl %r12d, %eax
imull %edx, %eax
leaq (%rbx,%rax,4), %rdi
xorl %esi, %esi
movq 56(%rsp), %rdx # 8-byte Reload
callq memset@PLT
movq 64(%rsp), %rdx # 8-byte Reload
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_4: # Parent Loop BB1_2 Depth=1
# => This Inner Loop Header: Depth=2
movl $2, (%r13,%rax,4)
movl $1, (%r14,%rax,4)
incq %rax
cmpq %rax, %rbp
jne .LBB1_4
jmp .LBB1_5
.LBB1_6: # %._crit_edge44
movl 28(%rsp), %eax # 4-byte Reload
shll $2, %eax
movslq %eax, %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq 40(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq 48(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl M(%rip), %esi
incl %esi
shrl %esi
movl K(%rip), %edx
incl %edx
shrl %edx
movq %rdx, %r15
shlq $32, %r15
orq %rsi, %r15
movl $.L.str.2, %edi
# kill: def $esi killed $esi killed $rsi
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
movabsq $8589934594, %rdx # imm = 0x200000002
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_8
# %bb.7:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movl M(%rip), %esi
movl K(%rip), %edi
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
movl %esi, 36(%rsp)
movl %edi, 32(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 36(%rsp), %rax
movq %rax, 168(%rsp)
leaq 32(%rsp), %rax
movq %rax, 176(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z9matrixAddPiS_S_ii, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_8:
callq hipDeviceSynchronize
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
cmpl $0, M(%rip)
jle .LBB1_14
# %bb.9: # %.preheader.preheader
xorl %r14d, %r14d
jmp .LBB1_10
.p2align 4, 0x90
.LBB1_13: # %._crit_edge47
# in Loop: Header=BB1_10 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
movslq M(%rip), %rax
cmpq %rax, %r14
jge .LBB1_14
.LBB1_10: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_12 Depth 2
cmpl $0, K(%rip)
jle .LBB1_13
# %bb.11: # %.lr.ph46
# in Loop: Header=BB1_10 Depth=1
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_12: # Parent Loop BB1_10 Depth=1
# => This Inner Loop Header: Depth=2
movl (%rbx,%r14,4), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl K(%rip), %ebp
jl .LBB1_12
jmp .LBB1_13
.LBB1_14: # %._crit_edge49
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9matrixAddPiS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type M,@object # @M
.bss
.globl M
.p2align 2, 0x0
M:
.long 0 # 0x0
.size M, 4
.type K,@object # @K
.globl K
.p2align 2, 0x0
K:
.long 0 # 0x0
.size K, 4
.type _Z9matrixAddPiS_S_ii,@object # @_Z9matrixAddPiS_S_ii
.section .rodata,"a",@progbits
.globl _Z9matrixAddPiS_S_ii
.p2align 3, 0x0
_Z9matrixAddPiS_S_ii:
.quad _Z24__device_stub__matrixAddPiS_S_ii
.size _Z9matrixAddPiS_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "M: "
.size .L.str, 4
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "K: "
.size .L.str.1, 4
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "dimGrid.x = %d, dimGrid.y = %d\n"
.size .L.str.2, 32
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "%d\t"
.size .L.str.4, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9matrixAddPiS_S_ii"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Result Matrix C:"
.size .Lstr, 17
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__matrixAddPiS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym M
.addrsig_sym K
.addrsig_sym _Z9matrixAddPiS_S_ii
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9matrixAddPiS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R0, c[0x0][0x178], R3 ; /* 0x00005e0000007a24 */
/* 0x000fe200078e0203 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe400078e0207 */
/*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe200078e0207 */
/*0120*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9matrixAddPiS_S_ii
.globl _Z9matrixAddPiS_S_ii
.p2align 8
.type _Z9matrixAddPiS_S_ii,@function
_Z9matrixAddPiS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b128 s[8:11], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v0, s4, v[1:2]
s_load_b64 s[0:1], s[0:1], 0x10
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s8, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s11, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9matrixAddPiS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9matrixAddPiS_S_ii, .Lfunc_end0-_Z9matrixAddPiS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9matrixAddPiS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9matrixAddPiS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0007c340_00000000-6_sum_two-dimensional_arrays.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii
.type _Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii, @function
_Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9matrixAddPiS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii, .-_Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii
.globl _Z9matrixAddPiS_S_ii
.type _Z9matrixAddPiS_S_ii, @function
_Z9matrixAddPiS_S_ii:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z9matrixAddPiS_S_ii, .-_Z9matrixAddPiS_S_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "M: "
.LC1:
.string "K: "
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "dimGrid.x = %d, dimGrid.y = %d\n"
.section .rodata.str1.1
.LC3:
.string "Result Matrix C:\n"
.LC4:
.string "%d\t"
.LC5:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq M(%rip), %rsi
leaq _ZSt3cin(%rip), %rbx
movq %rbx, %rdi
call _ZNSirsERi@PLT
leaq .LC1(%rip), %rsi
movq %rbp, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq K(%rip), %rsi
movq %rbx, %rdi
call _ZNSirsERi@PLT
movl M(%rip), %eax
imull K(%rip), %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L12
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %r12
movl M(%rip), %eax
imull K(%rip), %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L38
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %rbx
movl M(%rip), %eax
imull K(%rip), %eax
cltq
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L39
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %rbp
movl M(%rip), %edi
testl %edi, %edi
jle .L19
movl K(%rip), %r8d
movl $0, %esi
movl $0, %ecx
movslq %r8d, %r9
jmp .L20
.L12:
movq 56(%rsp), %rax
subq %fs:40, %rax
je .L15
call __stack_chk_fail@PLT
.L15:
call __cxa_throw_bad_array_new_length@PLT
.L38:
movq 56(%rsp), %rax
subq %fs:40, %rax
je .L18
call __stack_chk_fail@PLT
.L18:
call __cxa_throw_bad_array_new_length@PLT
.L39:
movq 56(%rsp), %rax
subq %fs:40, %rax
je .L21
call __stack_chk_fail@PLT
.L21:
call __cxa_throw_bad_array_new_length@PLT
.L23:
movslq %esi, %rdx
leaq 0(,%rdx,4), %rax
addq %r9, %rdx
salq $2, %rdx
.L22:
movl $2, (%r12,%rax)
movl $1, (%rbx,%rax)
movl $0, 0(%rbp,%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L22
.L24:
addl $1, %ecx
addl %edi, %esi
cmpl %edi, %ecx
je .L19
.L20:
testl %r8d, %r8d
jg .L23
jmp .L24
.L19:
imull K(%rip), %edi
leal 0(,%rdi,4), %r13d
movslq %r13d, %r13
leaq 8(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r13, %rdx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r13, %rdx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 40(%rsp)
movl K(%rip), %eax
leal 1(%rax), %ebx
shrl %ebx
movl M(%rip), %eax
leal 1(%rax), %r12d
shrl %r12d
movl $1, 52(%rsp)
movl %ebx, %ecx
movl %r12d, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r12d, 44(%rsp)
movl %ebx, 48(%rsp)
movl $2, 32(%rsp)
movl $2, 36(%rsp)
movl 40(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movq 44(%rsp), %rdi
movl 52(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L40
.L25:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %r13, %rdx
movq 24(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, M(%rip)
jle .L26
movl $0, %r13d
leaq .LC4(%rip), %r12
leaq .LC5(%rip), %r14
jmp .L27
.L40:
movl K(%rip), %r8d
movl M(%rip), %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z34__device_stub__Z9matrixAddPiS_S_iiPiS_S_ii
jmp .L25
.L28:
movl 0(%rbp), %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, K(%rip)
jg .L28
.L29:
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addq $4, %rbp
cmpl %r13d, M(%rip)
jle .L26
.L27:
movl $0, %ebx
cmpl $0, K(%rip)
jg .L28
jmp .L29
.L26:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L41
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z9matrixAddPiS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z9matrixAddPiS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl K
.bss
.align 4
.type K, @object
.size K, 4
K:
.zero 4
.globl M
.align 4
.type M, @object
.size M, 4
M:
.zero 4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sum_two-dimensional_arrays.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z24__device_stub__matrixAddPiS_S_ii # -- Begin function _Z24__device_stub__matrixAddPiS_S_ii
.p2align 4, 0x90
.type _Z24__device_stub__matrixAddPiS_S_ii,@function
_Z24__device_stub__matrixAddPiS_S_ii: # @_Z24__device_stub__matrixAddPiS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9matrixAddPiS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__matrixAddPiS_S_ii, .Lfunc_end0-_Z24__device_stub__matrixAddPiS_S_ii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $3, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt3cin, %edi
movl $M, %esi
callq _ZNSirsERi
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $3, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt3cin, %edi
movl $K, %esi
callq _ZNSirsERi
movl M(%rip), %r14d
movl K(%rip), %ebp
movl %ebp, %eax
imull %r14d, %eax
movl %eax, 28(%rsp) # 4-byte Spill
cltq
leaq (,%rax,4), %rcx
testl %eax, %eax
movq $-1, %rbx
cmovnsq %rcx, %rbx
movq %rbx, %rdi
callq _Znam
movq %rax, 40(%rsp) # 8-byte Spill
movq %rbx, %rdi
callq _Znam
movq %rax, 48(%rsp) # 8-byte Spill
movq %rbx, %rdi
callq _Znam
movq %r14, %rdx
movq %rax, %rbx
testl %edx, %edx
jle .LBB1_6
# %bb.1: # %.preheader41.lr.ph
leaq (,%rbp,4), %rax
movq %rax, 56(%rsp) # 8-byte Spill
xorl %r15d, %r15d
xorl %r12d, %r12d
movq %rdx, 64(%rsp) # 8-byte Spill
jmp .LBB1_2
.p2align 4, 0x90
.LBB1_5: # %._crit_edge
# in Loop: Header=BB1_2 Depth=1
incq %r12
addl %edx, %r15d
cmpq %rdx, %r12
je .LBB1_6
.LBB1_2: # %.preheader41
# =>This Loop Header: Depth=1
# Child Loop BB1_4 Depth 2
testl %ebp, %ebp
jle .LBB1_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB1_2 Depth=1
movl %r15d, %eax
movq 48(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
movq 40(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
movl %r12d, %eax
imull %edx, %eax
leaq (%rbx,%rax,4), %rdi
xorl %esi, %esi
movq 56(%rsp), %rdx # 8-byte Reload
callq memset@PLT
movq 64(%rsp), %rdx # 8-byte Reload
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_4: # Parent Loop BB1_2 Depth=1
# => This Inner Loop Header: Depth=2
movl $2, (%r13,%rax,4)
movl $1, (%r14,%rax,4)
incq %rax
cmpq %rax, %rbp
jne .LBB1_4
jmp .LBB1_5
.LBB1_6: # %._crit_edge44
movl 28(%rsp), %eax # 4-byte Reload
shll $2, %eax
movslq %eax, %r14
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq 40(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq 48(%rsp), %rsi # 8-byte Reload
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl M(%rip), %esi
incl %esi
shrl %esi
movl K(%rip), %edx
incl %edx
shrl %edx
movq %rdx, %r15
shlq $32, %r15
orq %rsi, %r15
movl $.L.str.2, %edi
# kill: def $esi killed $esi killed $rsi
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
movabsq $8589934594, %rdx # imm = 0x200000002
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_8
# %bb.7:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movl M(%rip), %esi
movl K(%rip), %edi
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
movl %esi, 36(%rsp)
movl %edi, 32(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 36(%rsp), %rax
movq %rax, 168(%rsp)
leaq 32(%rsp), %rax
movq %rax, 176(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z9matrixAddPiS_S_ii, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_8:
callq hipDeviceSynchronize
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
cmpl $0, M(%rip)
jle .LBB1_14
# %bb.9: # %.preheader.preheader
xorl %r14d, %r14d
jmp .LBB1_10
.p2align 4, 0x90
.LBB1_13: # %._crit_edge47
# in Loop: Header=BB1_10 Depth=1
movl $10, %edi
callq putchar@PLT
incq %r14
movslq M(%rip), %rax
cmpq %rax, %r14
jge .LBB1_14
.LBB1_10: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_12 Depth 2
cmpl $0, K(%rip)
jle .LBB1_13
# %bb.11: # %.lr.ph46
# in Loop: Header=BB1_10 Depth=1
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB1_12: # Parent Loop BB1_10 Depth=1
# => This Inner Loop Header: Depth=2
movl (%rbx,%r14,4), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl K(%rip), %ebp
jl .LBB1_12
jmp .LBB1_13
.LBB1_14: # %._crit_edge49
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9matrixAddPiS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type M,@object # @M
.bss
.globl M
.p2align 2, 0x0
M:
.long 0 # 0x0
.size M, 4
.type K,@object # @K
.globl K
.p2align 2, 0x0
K:
.long 0 # 0x0
.size K, 4
.type _Z9matrixAddPiS_S_ii,@object # @_Z9matrixAddPiS_S_ii
.section .rodata,"a",@progbits
.globl _Z9matrixAddPiS_S_ii
.p2align 3, 0x0
_Z9matrixAddPiS_S_ii:
.quad _Z24__device_stub__matrixAddPiS_S_ii
.size _Z9matrixAddPiS_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "M: "
.size .L.str, 4
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "K: "
.size .L.str.1, 4
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "dimGrid.x = %d, dimGrid.y = %d\n"
.size .L.str.2, 32
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "%d\t"
.size .L.str.4, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9matrixAddPiS_S_ii"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Result Matrix C:"
.size .Lstr, 17
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__matrixAddPiS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym M
.addrsig_sym K
.addrsig_sym _Z9matrixAddPiS_S_ii
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
* cudaComputer.cu
*
* Created on: 06.12.2011
* Author: id23cat
*/
#include "cudaComputer.cuh"
cudaComputer::cudaComputer() {
// TODO Auto-generated constructor stub
}
cudaComputer::~cudaComputer() {
// TODO Auto-generated destructor stub
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* cudaComputer.cu
*
* Created on: 06.12.2011
* Author: id23cat
*/
#include "cudaComputer.cuh"
cudaComputer::cudaComputer() {
// TODO Auto-generated constructor stub
}
cudaComputer::~cudaComputer() {
// TODO Auto-generated destructor stub
} | .file "tmpxft_00048b54_00000000-6_cudaComputer.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2036:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2036:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.align 2
.globl _ZN12cudaComputerC2Ev
.type _ZN12cudaComputerC2Ev, @function
_ZN12cudaComputerC2Ev:
.LFB2028:
.cfi_startproc
endbr64
leaq 16+_ZTV12cudaComputer(%rip), %rax
movq %rax, (%rdi)
ret
.cfi_endproc
.LFE2028:
.size _ZN12cudaComputerC2Ev, .-_ZN12cudaComputerC2Ev
.globl _ZN12cudaComputerC1Ev
.set _ZN12cudaComputerC1Ev,_ZN12cudaComputerC2Ev
.align 2
.globl _ZN12cudaComputerD2Ev
.type _ZN12cudaComputerD2Ev, @function
_ZN12cudaComputerD2Ev:
.LFB2031:
.cfi_startproc
endbr64
ret
.cfi_endproc
.LFE2031:
.size _ZN12cudaComputerD2Ev, .-_ZN12cudaComputerD2Ev
.globl _ZN12cudaComputerD1Ev
.set _ZN12cudaComputerD1Ev,_ZN12cudaComputerD2Ev
.align 2
.globl _ZN12cudaComputerD0Ev
.type _ZN12cudaComputerD0Ev, @function
_ZN12cudaComputerD0Ev:
.LFB2033:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl $24, %esi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2033:
.size _ZN12cudaComputerD0Ev, .-_ZN12cudaComputerD0Ev
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.weak _ZTS12cudaComputer
.section .rodata._ZTS12cudaComputer,"aG",@progbits,_ZTS12cudaComputer,comdat
.align 8
.type _ZTS12cudaComputer, @object
.size _ZTS12cudaComputer, 15
_ZTS12cudaComputer:
.string "12cudaComputer"
.weak _ZTI12cudaComputer
.section .data.rel.ro._ZTI12cudaComputer,"awG",@progbits,_ZTI12cudaComputer,comdat
.align 8
.type _ZTI12cudaComputer, @object
.size _ZTI12cudaComputer, 16
_ZTI12cudaComputer:
.quad _ZTVN10__cxxabiv117__class_type_infoE+16
.quad _ZTS12cudaComputer
.weak _ZTV12cudaComputer
.section .data.rel.ro._ZTV12cudaComputer,"awG",@progbits,_ZTV12cudaComputer,comdat
.align 8
.type _ZTV12cudaComputer, @object
.size _ZTV12cudaComputer, 40
_ZTV12cudaComputer:
.quad 0
.quad _ZTI12cudaComputer
.quad __cxa_pure_virtual
.quad 0
.quad 0
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.weak __cxa_pure_virtual
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* cudaComputer.cu
*
* Created on: 06.12.2011
* Author: id23cat
*/
#include "cudaComputer.cuh"
cudaComputer::cudaComputer() {
// TODO Auto-generated constructor stub
}
cudaComputer::~cudaComputer() {
// TODO Auto-generated destructor stub
} | /*
* cudaComputer.cuh
*
* Created on: 06.12.2011
* Author: id23cat
*/
#ifndef CUDACOMPUTER_CUH_
#define CUDACOMPUTER_CUH_
/*
*
*/
class cudaComputer {
float *devPtr;
float *hostPtr;
protected:
virtual void memCopyHtoD()=0;
public:
cudaComputer();
virtual ~cudaComputer();
};
#endif /* CUDACOMPUTER_CUH_ */ |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
* cudaComputer.cuh
*
* Created on: 06.12.2011
* Author: id23cat
*/
#ifndef CUDACOMPUTER_CUH_
#define CUDACOMPUTER_CUH_
/*
*
*/
class cudaComputer {
float *devPtr;
float *hostPtr;
protected:
virtual void memCopyHtoD()=0;
public:
cudaComputer();
virtual ~cudaComputer();
};
#endif /* CUDACOMPUTER_CUH_ */ | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
* cudaComputer.cuh
*
* Created on: 06.12.2011
* Author: id23cat
*/
#ifndef CUDACOMPUTER_CUH_
#define CUDACOMPUTER_CUH_
/*
*
*/
class cudaComputer {
float *devPtr;
float *hostPtr;
protected:
virtual void memCopyHtoD()=0;
public:
cudaComputer();
virtual ~cudaComputer();
};
#endif /* CUDACOMPUTER_CUH_ */ | .text
.file "cudaComputer.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00048b54_00000000-6_cudaComputer.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2036:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2036:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.align 2
.globl _ZN12cudaComputerC2Ev
.type _ZN12cudaComputerC2Ev, @function
_ZN12cudaComputerC2Ev:
.LFB2028:
.cfi_startproc
endbr64
leaq 16+_ZTV12cudaComputer(%rip), %rax
movq %rax, (%rdi)
ret
.cfi_endproc
.LFE2028:
.size _ZN12cudaComputerC2Ev, .-_ZN12cudaComputerC2Ev
.globl _ZN12cudaComputerC1Ev
.set _ZN12cudaComputerC1Ev,_ZN12cudaComputerC2Ev
.align 2
.globl _ZN12cudaComputerD2Ev
.type _ZN12cudaComputerD2Ev, @function
_ZN12cudaComputerD2Ev:
.LFB2031:
.cfi_startproc
endbr64
ret
.cfi_endproc
.LFE2031:
.size _ZN12cudaComputerD2Ev, .-_ZN12cudaComputerD2Ev
.globl _ZN12cudaComputerD1Ev
.set _ZN12cudaComputerD1Ev,_ZN12cudaComputerD2Ev
.align 2
.globl _ZN12cudaComputerD0Ev
.type _ZN12cudaComputerD0Ev, @function
_ZN12cudaComputerD0Ev:
.LFB2033:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl $24, %esi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2033:
.size _ZN12cudaComputerD0Ev, .-_ZN12cudaComputerD0Ev
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.weak _ZTS12cudaComputer
.section .rodata._ZTS12cudaComputer,"aG",@progbits,_ZTS12cudaComputer,comdat
.align 8
.type _ZTS12cudaComputer, @object
.size _ZTS12cudaComputer, 15
_ZTS12cudaComputer:
.string "12cudaComputer"
.weak _ZTI12cudaComputer
.section .data.rel.ro._ZTI12cudaComputer,"awG",@progbits,_ZTI12cudaComputer,comdat
.align 8
.type _ZTI12cudaComputer, @object
.size _ZTI12cudaComputer, 16
_ZTI12cudaComputer:
.quad _ZTVN10__cxxabiv117__class_type_infoE+16
.quad _ZTS12cudaComputer
.weak _ZTV12cudaComputer
.section .data.rel.ro._ZTV12cudaComputer,"awG",@progbits,_ZTV12cudaComputer,comdat
.align 8
.type _ZTV12cudaComputer, @object
.size _ZTV12cudaComputer, 40
_ZTV12cudaComputer:
.quad 0
.quad _ZTI12cudaComputer
.quad __cxa_pure_virtual
.quad 0
.quad 0
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.weak __cxa_pure_virtual
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudaComputer.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
__global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int gx, gy;
if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) {
gx =
1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] +
2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] +
1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)];
gy =
1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] +
(-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)];
intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy));
direction[y * iw + x] = atan2((float)gy, (float)gx);
}
} | code for sm_80
Function : _Z16applySobelFilterPhS_Pfii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e280000002100 */
/*0030*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0060*/ IADD3 R3, R2, 0x1, RZ ; /* 0x0000000102037810 */
/* 0x000fc80007ffe0ff */
/*0070*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fe20003f06270 */
/*0080*/ IMAD R3, R0, c[0x0][0x4], R5 ; /* 0x0000010000037a24 */
/* 0x002fc600078e0205 */
/*0090*/ ISETP.LT.OR P0, PT, R2, 0x1, P0 ; /* 0x000000010200780c */
/* 0x000fe40000701670 */
/*00a0*/ IADD3 R0, R3.reuse, 0x1, RZ ; /* 0x0000000103007810 */
/* 0x040fe40007ffe0ff */
/*00b0*/ ISETP.LT.OR P0, PT, R3, 0x1, P0 ; /* 0x000000010300780c */
/* 0x000fc80000701670 */
/*00c0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*00d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00e0*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff007624 */
/* 0x000fe200078e00ff */
/*00f0*/ IADD3 R5, R3.reuse, -0x1, RZ ; /* 0xffffffff03057810 */
/* 0x040fe20007ffe0ff */
/*0100*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0110*/ IMAD R7, R3, R0, c[0x0][0x17c] ; /* 0x00005f0003077624 */
/* 0x000fe400078e0200 */
/*0120*/ IMAD R5, R5, c[0x0][0x17c], R2.reuse ; /* 0x00005f0005057a24 */
/* 0x100fe400078e0202 */
/*0130*/ IMAD R0, R3, c[0x0][0x17c], R2 ; /* 0x00005f0003007a24 */
/* 0x000fe400078e0202 */
/*0140*/ IMAD.IADD R7, R2, 0x1, R7 ; /* 0x0000000102077824 */
/* 0x000fe200078e0207 */
/*0150*/ IADD3 R2, P0, R5, c[0x0][0x160], RZ ; /* 0x0000580005027a10 */
/* 0x000fc40007f1e0ff */
/*0160*/ IADD3 R8, R0, -0x1, RZ ; /* 0xffffffff00087810 */
/* 0x000fe40007ffe0ff */
/*0170*/ IADD3 R4, P2, R7, c[0x0][0x160], RZ ; /* 0x0000580007047a10 */
/* 0x000fe40007f5e0ff */
/*0180*/ LEA.HI.X.SX32 R3, R5, c[0x0][0x164], 0x1, P0 ; /* 0x0000590005037a11 */
/* 0x000fe400000f0eff */
/*0190*/ IADD3 R6, P1, R8.reuse, c[0x0][0x160], RZ ; /* 0x0000580008067a10 */
/* 0x040fe40007f3e0ff */
/*01a0*/ LEA.HI.X.SX32 R5, R7, c[0x0][0x164], 0x1, P2 ; /* 0x0000590007057a11 */
/* 0x000fe200010f0eff */
/*01b0*/ LDG.E.U8 R9, [R2.64+0x1] ; /* 0x0000010402097981 */
/* 0x000ea2000c1e1100 */
/*01c0*/ LEA.HI.X.SX32 R7, R8, c[0x0][0x164], 0x1, P1 ; /* 0x0000590008077a11 */
/* 0x000fc600008f0eff */
/*01d0*/ LDG.E.U8 R8, [R2.64+-0x1] ; /* 0xffffff0402087981 */
/* 0x000ea8000c1e1100 */
/*01e0*/ LDG.E.U8 R12, [R4.64+-0x1] ; /* 0xffffff04040c7981 */
/* 0x000ea8000c1e1100 */
/*01f0*/ LDG.E.U8 R14, [R2.64] ; /* 0x00000004020e7981 */
/* 0x000ee8000c1e1100 */
/*0200*/ LDG.E.U8 R15, [R4.64] ; /* 0x00000004040f7981 */
/* 0x000ee8000c1e1100 */
/*0210*/ LDG.E.U8 R13, [R4.64+0x1] ; /* 0x00000104040d7981 */
/* 0x000f28000c1e1100 */
/*0220*/ LDG.E.U8 R10, [R6.64] ; /* 0x00000004060a7981 */
/* 0x000f68000c1e1100 */
/*0230*/ LDG.E.U8 R11, [R6.64+0x2] ; /* 0x00000204060b7981 */
/* 0x000f62000c1e1100 */
/*0240*/ BSSY B0, 0x3d0 ; /* 0x0000018000007945 */
/* 0x000fe20003800000 */
/*0250*/ IADD3 R16, -R12, R9, R8 ; /* 0x000000090c107210 */
/* 0x004fc40007ffe108 */
/*0260*/ IADD3 R8, R12, R8, -R9 ; /* 0x000000080c087210 */
/* 0x000fe20007ffe809 */
/*0270*/ IMAD.IADD R14, R14, 0x1, -R15 ; /* 0x000000010e0e7824 */
/* 0x008fe400078e0a0f */
/*0280*/ IMAD.IADD R15, R16, 0x1, -R13.reuse ; /* 0x00000001100f7824 */
/* 0x110fe400078e0a0d */
/*0290*/ IMAD.IADD R3, R8, 0x1, -R13 ; /* 0x0000000108037824 */
/* 0x000fe400078e0a0d */
/*02a0*/ IMAD R14, R14, 0x2, R15 ; /* 0x000000020e0e7824 */
/* 0x000fe400078e020f */
/*02b0*/ IMAD.IADD R10, R10, 0x1, -R11 ; /* 0x000000010a0a7824 */
/* 0x020fe400078e0a0b */
/*02c0*/ I2F R2, R14 ; /* 0x0000000e00027306 */
/* 0x000e240000201400 */
/*02d0*/ IMAD R3, R10, 0x2, R3 ; /* 0x000000020a037824 */
/* 0x000fcc00078e0203 */
/*02e0*/ I2F R5, R3 ; /* 0x0000000300057306 */
/* 0x000e620000201400 */
/*02f0*/ FMUL R4, R2, R2 ; /* 0x0000000202047220 */
/* 0x001fc80000400000 */
/*0300*/ FFMA R4, R5, R5, R4 ; /* 0x0000000505047223 */
/* 0x002fca0000000004 */
/*0310*/ IADD3 R6, R4, -0xd000000, RZ ; /* 0xf300000004067810 */
/* 0x000fe20007ffe0ff */
/*0320*/ MUFU.RSQ R7, R4 ; /* 0x0000000400077308 */
/* 0x0000660000001400 */
/*0330*/ ISETP.GT.U32.AND P0, PT, R6, 0x727fffff, PT ; /* 0x727fffff0600780c */
/* 0x000fda0003f04070 */
/*0340*/ @!P0 BRA 0x380 ; /* 0x0000003000008947 */
/* 0x000fea0003800000 */
/*0350*/ MOV R11, 0x370 ; /* 0x00000370000b7802 */
/* 0x003fe40000000f00 */
/*0360*/ CALL.REL.NOINC 0xbd0 ; /* 0x0000086000007944 */
/* 0x000fea0003c00000 */
/*0370*/ BRA 0x3c0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0380*/ FMUL.FTZ R9, R4, R7 ; /* 0x0000000704097220 */
/* 0x003fe40000410000 */
/*0390*/ FMUL.FTZ R7, R7, 0.5 ; /* 0x3f00000007077820 */
/* 0x000fe40000410000 */
/*03a0*/ FFMA R4, -R9, R9, R4 ; /* 0x0000000909047223 */
/* 0x000fc80000000104 */
/*03b0*/ FFMA R4, R4, R7, R9 ; /* 0x0000000704047223 */
/* 0x000fe40000000009 */
/*03c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*03d0*/ F2I.U32.TRUNC.NTZ R11, R4 ; /* 0x00000004000b7305 */
/* 0x000e22000020f000 */
/*03e0*/ SHF.R.S32.HI R7, RZ, 0x1f, R0 ; /* 0x0000001fff077819 */
/* 0x000fe20000011400 */
/*03f0*/ BSSY B0, 0x820 ; /* 0x0000042000007945 */
/* 0x000fe20003800000 */
/*0400*/ IADD3 R8, P0, R0, c[0x0][0x168], RZ ; /* 0x00005a0000087a10 */
/* 0x000fe40007f1e0ff */
/*0410*/ FSETP.EQ.AND P1, PT, |R5|, RZ, PT ; /* 0x000000ff0500720b */
/* 0x000fe40003f22200 */
/*0420*/ IADD3.X R9, R7, c[0x0][0x16c], RZ, P0, !PT ; /* 0x00005b0007097a10 */
/* 0x000fe400007fe4ff */
/*0430*/ FSETP.NEU.AND P0, PT, |R2|, RZ, PT ; /* 0x000000ff0200720b */
/* 0x000fc60003f0d200 */
/*0440*/ STG.E.U8 [R8.64], R11 ; /* 0x0000000b08007986 */
/* 0x0011f4000c101104 */
/*0450*/ @!P0 BRA P1, 0x7e0 ; /* 0x0000038000008947 */
/* 0x000fea0000800000 */
/*0460*/ FSETP.NEU.AND P0, PT, |R2|, +INF , PT ; /* 0x7f8000000200780b */
/* 0x000fe40003f0d200 */
/*0470*/ FSETP.EQ.AND P1, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */
/* 0x000fda0003f22200 */
/*0480*/ @!P0 BRA P1, 0x790 ; /* 0x0000030000008947 */
/* 0x000fea0000800000 */
/*0490*/ FMNMX R6, |R5|.reuse, |R2|.reuse, !PT ; /* 0x4000000205067209 */
/* 0x0c0fe20007800200 */
/*04a0*/ BSSY B1, 0x580 ; /* 0x000000d000017945 */
/* 0x000fe20003800000 */
/*04b0*/ FMNMX R11, |R5|, |R2|, PT ; /* 0x40000002050b7209 */
/* 0x001fe40003800200 */
/*04c0*/ MUFU.RCP R9, R6 ; /* 0x0000000600097308 */
/* 0x000e300000001000 */
/*04d0*/ FCHK P0, R11, R6 ; /* 0x000000060b007302 */
/* 0x000e620000000000 */
/*04e0*/ FFMA R4, -R6, R9, 1 ; /* 0x3f80000006047423 */
/* 0x001fc80000000109 */
/*04f0*/ FFMA R4, R9, R4, R9 ; /* 0x0000000409047223 */
/* 0x000fc80000000009 */
/*0500*/ FFMA R9, R11, R4, RZ ; /* 0x000000040b097223 */
/* 0x000fc800000000ff */
/*0510*/ FFMA R8, -R6, R9, R11 ; /* 0x0000000906087223 */
/* 0x000fc8000000010b */
/*0520*/ FFMA R4, R4, R8, R9 ; /* 0x0000000804047223 */
/* 0x000fe20000000009 */
/*0530*/ @!P0 BRA 0x570 ; /* 0x0000003000008947 */
/* 0x002fea0003800000 */
/*0540*/ MOV R4, 0x560 ; /* 0x0000056000047802 */
/* 0x000fe40000000f00 */
/*0550*/ CALL.REL.NOINC 0xd40 ; /* 0x000007e000007944 */
/* 0x000fea0003c00000 */
/*0560*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0006 */
/*0570*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0580*/ FMUL R6, R4, R4 ; /* 0x0000000404067220 */
/* 0x000fe20000400000 */
/*0590*/ BSSY B1, 0x6f0 ; /* 0x0000015000017945 */
/* 0x000fe60003800000 */
/*05a0*/ FADD R9, R6, 11.33538818359375 ; /* 0x41355dc006097421 */
/* 0x000fc80000000000 */
/*05b0*/ FFMA R9, R6, R9, 28.84246826171875 ; /* 0x41e6bd6006097423 */
/* 0x000fc80000000009 */
/*05c0*/ FFMA R13, R6, R9, 19.6966705322265625 ; /* 0x419d92c8060d7423 */
/* 0x000fe40000000009 */
/*05d0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x3f52c7ea ; /* 0x3f52c7eaff097424 */
/* 0x000fc600078e00ff */
/*05e0*/ IADD3 R8, R13, 0x1800000, RZ ; /* 0x018000000d087810 */
/* 0x000fe20007ffe0ff */
/*05f0*/ FFMA R9, R6, -R9, -5.6748671531677246094 ; /* 0xc0b5988306097423 */
/* 0x000fc60000000809 */
/*0600*/ LOP3.LUT R8, R8, 0x7f800000, RZ, 0xc0, !PT ; /* 0x7f80000008087812 */
/* 0x000fe200078ec0ff */
/*0610*/ FFMA R9, R6, R9, -6.5655550956726074219 ; /* 0xc0d2190706097423 */
/* 0x000fc60000000009 */
/*0620*/ ISETP.GT.U32.AND P0, PT, R8, 0x1ffffff, PT ; /* 0x01ffffff0800780c */
/* 0x000fe20003f04070 */
/*0630*/ FMUL R9, R6, R9 ; /* 0x0000000906097220 */
/* 0x000fc80000400000 */
/*0640*/ FMUL R9, R9, R4 ; /* 0x0000000409097220 */
/* 0x000fd00000400000 */
/*0650*/ @P0 BRA 0x6a0 ; /* 0x0000004000000947 */
/* 0x000fea0003800000 */
/*0660*/ MOV R6, 0x680 ; /* 0x0000068000067802 */
/* 0x000fe40000000f00 */
/*0670*/ CALL.REL.NOINC 0x860 ; /* 0x000001e000007944 */
/* 0x000fea0003c00000 */
/*0680*/ IMAD.MOV.U32 R6, RZ, RZ, R8 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0008 */
/*0690*/ BRA 0x6e0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*06a0*/ MUFU.RCP R6, R13 ; /* 0x0000000d00067308 */
/* 0x000e240000001000 */
/*06b0*/ FFMA R8, R13, R6, -1 ; /* 0xbf8000000d087423 */
/* 0x001fc80000000006 */
/*06c0*/ FADD.FTZ R11, -R8, -RZ ; /* 0x800000ff080b7221 */
/* 0x000fc80000010100 */
/*06d0*/ FFMA R6, R6, R11, R6 ; /* 0x0000000b06067223 */
/* 0x000fe40000000006 */
/*06e0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*06f0*/ FSETP.GT.AND P0, PT, |R2|.reuse, |R5|, PT ; /* 0x400000050200720b */
/* 0x040fe20003f04200 */
/*0700*/ FADD R5, |R5|, |R2| ; /* 0x4000000205057221 */
/* 0x000fe20000000200 */
/*0710*/ ISETP.GE.AND P1, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fe20003f26270 */
/*0720*/ FFMA R4, R9, R6, R4 ; /* 0x0000000609047223 */
/* 0x000fe20000000004 */
/*0730*/ LOP3.LUT R3, R2, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000002037812 */
/* 0x000fe400078ec0ff */
/*0740*/ FSETP.GTU.AND P2, PT, R5, +INF , PT ; /* 0x7f8000000500780b */
/* 0x000fce0003f4c000 */
/*0750*/ @P0 FADD R4, -R4, 1.5707963705062866211 ; /* 0x3fc90fdb04040421 */
/* 0x000fc80000000100 */
/*0760*/ @!P1 FADD R4, -R4, 3.1415927410125732422 ; /* 0x40490fdb04049421 */
/* 0x000fca0000000100 */
/*0770*/ @!P2 LOP3.LUT R5, R3, R4, RZ, 0xfc, !PT ; /* 0x000000040305a212 */
/* 0x000fe200078efcff */
/*0780*/ BRA 0x810 ; /* 0x0000008000007947 */
/* 0x000fea0003800000 */
/*0790*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4016cbe4 ; /* 0x4016cbe4ff057424 */
/* 0x000fe200078e00ff */
/*07a0*/ ISETP.GE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fc80003f06270 */
/*07b0*/ SEL R5, R5, 0x3f490fdb, !P0 ; /* 0x3f490fdb05057807 */
/* 0x000fc80004000000 */
/*07c0*/ LOP3.LUT R5, R5, 0x80000000, R2, 0xf8, !PT ; /* 0x8000000005057812 */
/* 0x000fe200078ef802 */
/*07d0*/ BRA 0x810 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*07e0*/ SHF.R.S32.HI R3, RZ, 0x1f, R3 ; /* 0x0000001fff037819 */
/* 0x000fe40000011403 */
/*07f0*/ LOP3.LUT R2, R2, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000002027812 */
/* 0x000fc800078ec0ff */
/*0800*/ LOP3.LUT R5, R2, 0x40490fdb, R3, 0xf8, !PT ; /* 0x40490fdb02057812 */
/* 0x000fe400078ef803 */
/*0810*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0820*/ LEA R2, P0, R0, c[0x0][0x170], 0x2 ; /* 0x00005c0000027a11 */
/* 0x000fc800078010ff */
/*0830*/ LEA.HI.X R3, R0, c[0x0][0x174], R7, 0x2, P0 ; /* 0x00005d0000037a11 */
/* 0x000fca00000f1407 */
/*0840*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0850*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0860*/ IMAD.SHL.U32 R8, R13, 0x2, RZ ; /* 0x000000020d087824 */
/* 0x000fe200078e00ff */
/*0870*/ BSSY B2, 0xb90 ; /* 0x0000031000027945 */
/* 0x000fe80003800000 */
/*0880*/ SHF.R.U32.HI R15, RZ, 0x18, R8 ; /* 0x00000018ff0f7819 */
/* 0x000fe20000011608 */
/*0890*/ IMAD.MOV.U32 R8, RZ, RZ, R13 ; /* 0x000000ffff087224 */
/* 0x000fc600078e000d */
/*08a0*/ ISETP.NE.U32.AND P0, PT, R15, RZ, PT ; /* 0x000000ff0f00720c */
/* 0x000fda0003f05070 */
/*08b0*/ @P0 BRA 0x960 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*08c0*/ IMAD.SHL.U32 R10, R8, 0x2, RZ ; /* 0x00000002080a7824 */
/* 0x000fca00078e00ff */
/*08d0*/ ISETP.NE.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fda0003f05270 */
/*08e0*/ @P0 FFMA R11, R8, 1.84467440737095516160e+19, RZ ; /* 0x5f800000080b0823 */
/* 0x000fe200000000ff */
/*08f0*/ @!P0 MUFU.RCP R10, R8 ; /* 0x00000008000a8308 */
/* 0x000ff00000001000 */
/*0900*/ @P0 MUFU.RCP R12, R11 ; /* 0x0000000b000c0308 */
/* 0x000e240000001000 */
/*0910*/ @P0 FFMA R13, R11, R12, -1 ; /* 0xbf8000000b0d0423 */
/* 0x001fc8000000000c */
/*0920*/ @P0 FADD.FTZ R13, -R13, -RZ ; /* 0x800000ff0d0d0221 */
/* 0x000fc80000010100 */
/*0930*/ @P0 FFMA R13, R12, R13, R12 ; /* 0x0000000d0c0d0223 */
/* 0x000fc8000000000c */
/*0940*/ @P0 FFMA R10, R13, 1.84467440737095516160e+19, RZ ; /* 0x5f8000000d0a0823 */
/* 0x000fe200000000ff */
/*0950*/ BRA 0xb80 ; /* 0x0000022000007947 */
/* 0x000fea0003800000 */
/*0960*/ IADD3 R17, R15, -0xfd, RZ ; /* 0xffffff030f117810 */
/* 0x000fc80007ffe0ff */
/*0970*/ ISETP.GT.U32.AND P0, PT, R17, 0x1, PT ; /* 0x000000011100780c */
/* 0x000fda0003f04070 */
/*0980*/ @P0 BRA 0xb70 ; /* 0x000001e000000947 */
/* 0x000fea0003800000 */
/*0990*/ LOP3.LUT R10, R8, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff080a7812 */
/* 0x000fe200078ec0ff */
/*09a0*/ IMAD.MOV.U32 R14, RZ, RZ, 0x3 ; /* 0x00000003ff0e7424 */
/* 0x000fc600078e00ff */
/*09b0*/ LOP3.LUT R10, R10, 0x3f800000, RZ, 0xfc, !PT ; /* 0x3f8000000a0a7812 */
/* 0x000fe400078efcff */
/*09c0*/ SHF.L.U32 R14, R14, R17, RZ ; /* 0x000000110e0e7219 */
/* 0x000fe400000006ff */
/*09d0*/ MUFU.RCP R11, R10 ; /* 0x0000000a000b7308 */
/* 0x000e240000001000 */
/*09e0*/ FFMA R12, R10, R11, -1 ; /* 0xbf8000000a0c7423 */
/* 0x001fc8000000000b */
/*09f0*/ FADD.FTZ R12, -R12, -RZ ; /* 0x800000ff0c0c7221 */
/* 0x000fc80000010100 */
/*0a00*/ FFMA.RM R13, R11.reuse, R12.reuse, R11.reuse ; /* 0x0000000c0b0d7223 */
/* 0x1c0fe4000000400b */
/*0a10*/ FFMA.RP R12, R11, R12, R11 ; /* 0x0000000c0b0c7223 */
/* 0x000fc6000000800b */
/*0a20*/ LOP3.LUT R11, R13.reuse, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff0d0b7812 */
/* 0x040fe400078ec0ff */
/*0a30*/ FSETP.NEU.FTZ.AND P0, PT, R13, R12, PT ; /* 0x0000000c0d00720b */
/* 0x000fe40003f1d000 */
/*0a40*/ LOP3.LUT R11, R11, 0x800000, RZ, 0xfc, !PT ; /* 0x008000000b0b7812 */
/* 0x000fe400078efcff */
/*0a50*/ SEL R12, RZ, 0xffffffff, !P0 ; /* 0xffffffffff0c7807 */
/* 0x000fe40004000000 */
/*0a60*/ LOP3.LUT R14, R14, R11, RZ, 0xc0, !PT ; /* 0x0000000b0e0e7212 */
/* 0x000fc600078ec0ff */
/*0a70*/ IMAD.MOV R12, RZ, RZ, -R12 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e0a0c */
/*0a80*/ SHF.R.U32.HI R14, RZ, R17, R14 ; /* 0x00000011ff0e7219 */
/* 0x000fc8000001160e */
/*0a90*/ LOP3.LUT P1, RZ, R12, R17, R11, 0xf8, !PT ; /* 0x000000110cff7212 */
/* 0x000fe4000782f80b */
/*0aa0*/ LOP3.LUT P0, RZ, R14.reuse, 0x1, RZ, 0xc0, !PT ; /* 0x000000010eff7812 */
/* 0x040fe4000780c0ff */
/*0ab0*/ LOP3.LUT P2, RZ, R14, 0x2, RZ, 0xc0, !PT ; /* 0x000000020eff7812 */
/* 0x000fc8000784c0ff */
/*0ac0*/ PLOP3.LUT P0, PT, P0, P1, P2, 0xe0, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703c20 */
/*0ad0*/ LOP3.LUT P1, RZ, R8, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff08ff7812 */
/* 0x000fe4000782c0ff */
/*0ae0*/ SEL R10, RZ, 0x1, !P0 ; /* 0x00000001ff0a7807 */
/* 0x000fca0004000000 */
/*0af0*/ IMAD.MOV R10, RZ, RZ, -R10 ; /* 0x000000ffff0a7224 */
/* 0x000fca00078e0a0a */
/*0b00*/ ISETP.GE.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe40003f06270 */
/*0b10*/ IADD3 R10, R15, -0xfc, RZ ; /* 0xffffff040f0a7810 */
/* 0x000fc80007ffe0ff */
/*0b20*/ SHF.R.U32.HI R11, RZ, R10, R11 ; /* 0x0000000aff0b7219 */
/* 0x000fce000001160b */
/*0b30*/ @!P0 IADD3 R11, R11, 0x1, RZ ; /* 0x000000010b0b8810 */
/* 0x000fca0007ffe0ff */
/*0b40*/ @!P1 IMAD.SHL.U32 R11, R11, 0x2, RZ ; /* 0x000000020b0b9824 */
/* 0x000fca00078e00ff */
/*0b50*/ LOP3.LUT R10, R11, 0x80000000, R8, 0xf8, !PT ; /* 0x800000000b0a7812 */
/* 0x000fe200078ef808 */
/*0b60*/ BRA 0xb80 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0b70*/ MUFU.RCP R10, R8 ; /* 0x00000008000a7308 */
/* 0x0000640000001000 */
/*0b80*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*0b90*/ IMAD.MOV.U32 R8, RZ, RZ, R10 ; /* 0x000000ffff087224 */
/* 0x003fe400078e000a */
/*0ba0*/ IMAD.MOV.U32 R10, RZ, RZ, R6 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e0006 */
/*0bb0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x0 ; /* 0x00000000ff0b7424 */
/* 0x000fc800078e00ff */
/*0bc0*/ RET.REL.NODEC R10 0x0 ; /* 0xfffff4300a007950 */
/* 0x000fea0003c3ffff */
/*0bd0*/ LOP3.LUT P0, RZ, R4, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff04ff7812 */
/* 0x000fda000780c0ff */
/*0be0*/ @!P0 IMAD.MOV.U32 R6, RZ, RZ, R4 ; /* 0x000000ffff068224 */
/* 0x000fe200078e0004 */
/*0bf0*/ @!P0 BRA 0xd00 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*0c00*/ FSETP.GEU.FTZ.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720b */
/* 0x000fda0003f1e000 */
/*0c10*/ @!P0 IMAD.MOV.U32 R6, RZ, RZ, 0x7fffffff ; /* 0x7fffffffff068424 */
/* 0x000fe200078e00ff */
/*0c20*/ @!P0 BRA 0xd00 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*0c30*/ FSETP.GTU.FTZ.AND P0, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fda0003f1c200 */
/*0c40*/ @P0 FADD.FTZ R6, R4, 1 ; /* 0x3f80000004060421 */
/* 0x000fe20000010000 */
/*0c50*/ @P0 BRA 0xd00 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*0c60*/ FSETP.NEU.FTZ.AND P0, PT, |R4|, +INF , PT ; /* 0x7f8000000400780b */
/* 0x000fda0003f1d200 */
/*0c70*/ @P0 FFMA R7, R4, 1.84467440737095516160e+19, RZ ; /* 0x5f80000004070823 */
/* 0x000fc800000000ff */
/*0c80*/ @P0 MUFU.RSQ R6, R7 ; /* 0x0000000700060308 */
/* 0x000e240000001400 */
/*0c90*/ @P0 FMUL.FTZ R8, R7, R6 ; /* 0x0000000607080220 */
/* 0x001fe40000410000 */
/*0ca0*/ @P0 FMUL.FTZ R10, R6, 0.5 ; /* 0x3f000000060a0820 */
/* 0x000fe40000410000 */
/*0cb0*/ @P0 FADD.FTZ R9, -R8.reuse, -RZ ; /* 0x800000ff08090221 */
/* 0x040fe40000010100 */
/*0cc0*/ @!P0 IMAD.MOV.U32 R6, RZ, RZ, R4 ; /* 0x000000ffff068224 */
/* 0x000fe400078e0004 */
/*0cd0*/ @P0 FFMA R9, R8, R9, R7 ; /* 0x0000000908090223 */
/* 0x000fc80000000007 */
/*0ce0*/ @P0 FFMA R9, R9, R10, R8 ; /* 0x0000000a09090223 */
/* 0x000fc80000000008 */
/*0cf0*/ @P0 FMUL.FTZ R6, R9, 2.3283064365386962891e-10 ; /* 0x2f80000009060820 */
/* 0x000fc80000410000 */
/*0d00*/ IMAD.MOV.U32 R4, RZ, RZ, R6 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0006 */
/*0d10*/ IMAD.MOV.U32 R6, RZ, RZ, R11 ; /* 0x000000ffff067224 */
/* 0x000fe400078e000b */
/*0d20*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x000fc800078e00ff */
/*0d30*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff2c006007950 */
/* 0x000fea0003c3ffff */
/*0d40*/ SHF.R.U32.HI R9, RZ, 0x17, R6.reuse ; /* 0x00000017ff097819 */
/* 0x100fe20000011606 */
/*0d50*/ BSSY B2, 0x13a0 ; /* 0x0000064000027945 */
/* 0x000fe20003800000 */
/*0d60*/ SHF.R.U32.HI R8, RZ, 0x17, R11.reuse ; /* 0x00000017ff087819 */
/* 0x100fe2000001160b */
/*0d70*/ IMAD.MOV.U32 R12, RZ, RZ, R11 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e000b */
/*0d80*/ LOP3.LUT R9, R9, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff09097812 */
/* 0x000fe200078ec0ff */
/*0d90*/ IMAD.MOV.U32 R13, RZ, RZ, R6 ; /* 0x000000ffff0d7224 */
/* 0x000fe200078e0006 */
/*0da0*/ LOP3.LUT R10, R8, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff080a7812 */
/* 0x000fe400078ec0ff */
/*0db0*/ IADD3 R15, R9, -0x1, RZ ; /* 0xffffffff090f7810 */
/* 0x000fe40007ffe0ff */
/*0dc0*/ IADD3 R14, R10, -0x1, RZ ; /* 0xffffffff0a0e7810 */
/* 0x000fc40007ffe0ff */
/*0dd0*/ ISETP.GT.U32.AND P0, PT, R15, 0xfd, PT ; /* 0x000000fd0f00780c */
/* 0x000fc80003f04070 */
/*0de0*/ ISETP.GT.U32.OR P0, PT, R14, 0xfd, P0 ; /* 0x000000fd0e00780c */
/* 0x000fda0000704470 */
/*0df0*/ @!P0 IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff088224 */
/* 0x000fe200078e00ff */
/*0e00*/ @!P0 BRA 0xf80 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0e10*/ FSETP.GTU.FTZ.AND P0, PT, |R11|, +INF , PT ; /* 0x7f8000000b00780b */
/* 0x000fe40003f1c200 */
/*0e20*/ FSETP.GTU.FTZ.AND P1, PT, |R6|, +INF , PT ; /* 0x7f8000000600780b */
/* 0x000fc80003f3c200 */
/*0e30*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000703570 */
/*0e40*/ @P0 BRA 0x1380 ; /* 0x0000053000000947 */
/* 0x000fea0003800000 */
/*0e50*/ LOP3.LUT P0, RZ, R13, 0x7fffffff, R12, 0xc8, !PT ; /* 0x7fffffff0dff7812 */
/* 0x000fda000780c80c */
/*0e60*/ @!P0 BRA 0x1360 ; /* 0x000004f000008947 */
/* 0x000fea0003800000 */
/*0e70*/ FSETP.NEU.FTZ.AND P2, PT, |R11|.reuse, +INF , PT ; /* 0x7f8000000b00780b */
/* 0x040fe40003f5d200 */
/*0e80*/ FSETP.NEU.FTZ.AND P1, PT, |R6|, +INF , PT ; /* 0x7f8000000600780b */
/* 0x000fe40003f3d200 */
/*0e90*/ FSETP.NEU.FTZ.AND P0, PT, |R11|, +INF , PT ; /* 0x7f8000000b00780b */
/* 0x000fd60003f1d200 */
/*0ea0*/ @!P1 BRA !P2, 0x1360 ; /* 0x000004b000009947 */
/* 0x000fea0005000000 */
/*0eb0*/ LOP3.LUT P2, RZ, R12, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff0cff7812 */
/* 0x000fc8000784c0ff */
/*0ec0*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000f24572 */
/*0ed0*/ @P1 BRA 0x1340 ; /* 0x0000046000001947 */
/* 0x000fea0003800000 */
/*0ee0*/ LOP3.LUT P1, RZ, R13, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff0dff7812 */
/* 0x000fc8000782c0ff */
/*0ef0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000702572 */
/*0f00*/ @P0 BRA 0x1310 ; /* 0x0000040000000947 */
/* 0x000fea0003800000 */
/*0f10*/ ISETP.GE.AND P0, PT, R14, RZ, PT ; /* 0x000000ff0e00720c */
/* 0x000fe40003f06270 */
/*0f20*/ ISETP.GE.AND P1, PT, R15, RZ, PT ; /* 0x000000ff0f00720c */
/* 0x000fd60003f26270 */
/*0f30*/ @P0 IMAD.MOV.U32 R8, RZ, RZ, RZ ; /* 0x000000ffff080224 */
/* 0x000fe400078e00ff */
/*0f40*/ @!P0 IMAD.MOV.U32 R8, RZ, RZ, -0x40 ; /* 0xffffffc0ff088424 */
/* 0x000fe400078e00ff */
/*0f50*/ @!P0 FFMA R12, R11, 1.84467440737095516160e+19, RZ ; /* 0x5f8000000b0c8823 */
/* 0x000fe400000000ff */
/*0f60*/ @!P1 FFMA R13, R6, 1.84467440737095516160e+19, RZ ; /* 0x5f800000060d9823 */
/* 0x000fe200000000ff */
/*0f70*/ @!P1 IADD3 R8, R8, 0x40, RZ ; /* 0x0000004008089810 */
/* 0x000fe40007ffe0ff */
/*0f80*/ LEA R6, R9, 0xc0800000, 0x17 ; /* 0xc080000009067811 */
/* 0x000fe200078eb8ff */
/*0f90*/ BSSY B3, 0x1300 ; /* 0x0000036000037945 */
/* 0x000fe20003800000 */
/*0fa0*/ IADD3 R10, R10, -0x7f, RZ ; /* 0xffffff810a0a7810 */
/* 0x000fc60007ffe0ff */
/*0fb0*/ IMAD.IADD R13, R13, 0x1, -R6 ; /* 0x000000010d0d7824 */
/* 0x000fe400078e0a06 */
/*0fc0*/ IMAD R12, R10, -0x800000, R12 ; /* 0xff8000000a0c7824 */
/* 0x000fe400078e020c */
/*0fd0*/ MUFU.RCP R6, R13 ; /* 0x0000000d00067308 */
/* 0x000e220000001000 */
/*0fe0*/ FADD.FTZ R11, -R13, -RZ ; /* 0x800000ff0d0b7221 */
/* 0x000fc80000010100 */
/*0ff0*/ FFMA R15, R6, R11, 1 ; /* 0x3f800000060f7423 */
/* 0x001fc8000000000b */
/*1000*/ FFMA R17, R6, R15, R6 ; /* 0x0000000f06117223 */
/* 0x000fc80000000006 */
/*1010*/ FFMA R6, R12, R17, RZ ; /* 0x000000110c067223 */
/* 0x000fc800000000ff */
/*1020*/ FFMA R15, R11, R6, R12 ; /* 0x000000060b0f7223 */
/* 0x000fc8000000000c */
/*1030*/ FFMA R14, R17, R15, R6 ; /* 0x0000000f110e7223 */
/* 0x000fc80000000006 */
/*1040*/ FFMA R12, R11, R14, R12 ; /* 0x0000000e0b0c7223 */
/* 0x000fe2000000000c */
/*1050*/ IADD3 R11, R10, 0x7f, -R9 ; /* 0x0000007f0a0b7810 */
/* 0x000fc60007ffe809 */
/*1060*/ FFMA R6, R17, R12, R14 ; /* 0x0000000c11067223 */
/* 0x000fe4000000000e */
/*1070*/ IMAD.IADD R11, R11, 0x1, R8 ; /* 0x000000010b0b7824 */
/* 0x000fc600078e0208 */
/*1080*/ SHF.R.U32.HI R9, RZ, 0x17, R6 ; /* 0x00000017ff097819 */
/* 0x000fc80000011606 */
/*1090*/ LOP3.LUT R9, R9, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff09097812 */
/* 0x000fca00078ec0ff */
/*10a0*/ IMAD.IADD R13, R9, 0x1, R11 ; /* 0x00000001090d7824 */
/* 0x000fca00078e020b */
/*10b0*/ IADD3 R8, R13, -0x1, RZ ; /* 0xffffffff0d087810 */
/* 0x000fc80007ffe0ff */
/*10c0*/ ISETP.GE.U32.AND P0, PT, R8, 0xfe, PT ; /* 0x000000fe0800780c */
/* 0x000fda0003f06070 */
/*10d0*/ @!P0 BRA 0x12e0 ; /* 0x0000020000008947 */
/* 0x000fea0003800000 */
/*10e0*/ ISETP.GT.AND P0, PT, R13, 0xfe, PT ; /* 0x000000fe0d00780c */
/* 0x000fda0003f04270 */
/*10f0*/ @P0 BRA 0x12b0 ; /* 0x000001b000000947 */
/* 0x000fea0003800000 */
/*1100*/ ISETP.GE.AND P0, PT, R13, 0x1, PT ; /* 0x000000010d00780c */
/* 0x000fda0003f06270 */
/*1110*/ @P0 BRA 0x12f0 ; /* 0x000001d000000947 */
/* 0x000fea0003800000 */
/*1120*/ ISETP.GE.AND P0, PT, R13, -0x18, PT ; /* 0xffffffe80d00780c */
/* 0x000fe40003f06270 */
/*1130*/ LOP3.LUT R6, R6, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000006067812 */
/* 0x000fd600078ec0ff */
/*1140*/ @!P0 BRA 0x12f0 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*1150*/ FFMA.RZ R8, R17, R12.reuse, R14.reuse ; /* 0x0000000c11087223 */
/* 0x180fe2000000c00e */
/*1160*/ IADD3 R11, R13, 0x20, RZ ; /* 0x000000200d0b7810 */
/* 0x000fe20007ffe0ff */
/*1170*/ FFMA.RM R9, R17, R12.reuse, R14.reuse ; /* 0x0000000c11097223 */
/* 0x180fe2000000400e */
/*1180*/ ISETP.NE.AND P2, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x000fe40003f45270 */
/*1190*/ LOP3.LUT R10, R8, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff080a7812 */
/* 0x000fe200078ec0ff */
/*11a0*/ FFMA.RP R8, R17, R12, R14 ; /* 0x0000000c11087223 */
/* 0x000fe2000000800e */
/*11b0*/ ISETP.NE.AND P1, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x000fe20003f25270 */
/*11c0*/ IMAD.MOV R12, RZ, RZ, -R13 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e0a0d */
/*11d0*/ LOP3.LUT R10, R10, 0x800000, RZ, 0xfc, !PT ; /* 0x008000000a0a7812 */
/* 0x000fe400078efcff */
/*11e0*/ FSETP.NEU.FTZ.AND P0, PT, R8, R9, PT ; /* 0x000000090800720b */
/* 0x000fc40003f1d000 */
/*11f0*/ SHF.L.U32 R11, R10, R11, RZ ; /* 0x0000000b0a0b7219 */
/* 0x000fe400000006ff */
/*1200*/ SEL R9, R12, RZ, P2 ; /* 0x000000ff0c097207 */
/* 0x000fe40001000000 */
/*1210*/ ISETP.NE.AND P1, PT, R11, RZ, P1 ; /* 0x000000ff0b00720c */
/* 0x000fe40000f25270 */
/*1220*/ SHF.R.U32.HI R9, RZ, R9, R10 ; /* 0x00000009ff097219 */
/* 0x000fe4000001160a */
/*1230*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703570 */
/*1240*/ SHF.R.U32.HI R11, RZ, 0x1, R9 ; /* 0x00000001ff0b7819 */
/* 0x000fc40000011609 */
/*1250*/ SEL R8, RZ, 0x1, !P0 ; /* 0x00000001ff087807 */
/* 0x000fc80004000000 */
/*1260*/ LOP3.LUT R8, R8, 0x1, R11, 0xf8, !PT ; /* 0x0000000108087812 */
/* 0x000fc800078ef80b */
/*1270*/ LOP3.LUT R8, R8, R9, RZ, 0xc0, !PT ; /* 0x0000000908087212 */
/* 0x000fca00078ec0ff */
/*1280*/ IMAD.IADD R11, R11, 0x1, R8 ; /* 0x000000010b0b7824 */
/* 0x000fca00078e0208 */
/*1290*/ LOP3.LUT R6, R11, R6, RZ, 0xfc, !PT ; /* 0x000000060b067212 */
/* 0x000fe200078efcff */
/*12a0*/ BRA 0x12f0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*12b0*/ LOP3.LUT R6, R6, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000006067812 */
/* 0x000fc800078ec0ff */
/*12c0*/ LOP3.LUT R6, R6, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000006067812 */
/* 0x000fe200078efcff */
/*12d0*/ BRA 0x12f0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*12e0*/ IMAD R6, R11, 0x800000, R6 ; /* 0x008000000b067824 */
/* 0x000fe400078e0206 */
/*12f0*/ BSYNC B3 ; /* 0x0000000000037941 */
/* 0x000fea0003800000 */
/*1300*/ BRA 0x1390 ; /* 0x0000008000007947 */
/* 0x000fea0003800000 */
/*1310*/ LOP3.LUT R6, R13, 0x80000000, R12, 0x48, !PT ; /* 0x800000000d067812 */
/* 0x000fc800078e480c */
/*1320*/ LOP3.LUT R6, R6, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000006067812 */
/* 0x000fe200078efcff */
/*1330*/ BRA 0x1390 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*1340*/ LOP3.LUT R6, R13, 0x80000000, R12, 0x48, !PT ; /* 0x800000000d067812 */
/* 0x000fe200078e480c */
/*1350*/ BRA 0x1390 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*1360*/ MUFU.RSQ R6, -QNAN ; /* 0xffc0000000067908 */
/* 0x000e220000001400 */
/*1370*/ BRA 0x1390 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*1380*/ FADD.FTZ R6, R11, R6 ; /* 0x000000060b067221 */
/* 0x000fe40000010000 */
/*1390*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*13a0*/ IMAD.MOV.U32 R8, RZ, RZ, R4 ; /* 0x000000ffff087224 */
/* 0x000fe400078e0004 */
/*13b0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x0 ; /* 0x00000000ff097424 */
/* 0x000fc800078e00ff */
/*13c0*/ RET.REL.NODEC R8 0x0 ; /* 0xffffec3008007950 */
/* 0x000fea0003c3ffff */
/*13d0*/ BRA 0x13d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*13e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*13f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
__global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int gx, gy;
if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) {
gx =
1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] +
2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] +
1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)];
gy =
1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] +
(-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)];
intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy));
direction[y * iw + x] = atan2((float)gy, (float)gx);
}
} | .file "tmpxft_00053d1f_00000000-6_applySobelFilter.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii
.type _Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii, @function
_Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16applySobelFilterPhS_Pfii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii, .-_Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii
.globl _Z16applySobelFilterPhS_Pfii
.type _Z16applySobelFilterPhS_Pfii, @function
_Z16applySobelFilterPhS_Pfii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16applySobelFilterPhS_Pfii, .-_Z16applySobelFilterPhS_Pfii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16applySobelFilterPhS_Pfii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16applySobelFilterPhS_Pfii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
__global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int gx, gy;
if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) {
gx =
1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] +
2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] +
1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)];
gy =
1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] +
(-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)];
intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy));
direction[y * iw + x] = atan2((float)gy, (float)gx);
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
__global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int gx, gy;
if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) {
gx =
1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] +
2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] +
1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)];
gy =
1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] +
(-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)];
intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy));
direction[y * iw + x] = atan2((float)gy, (float)gx);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
__global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int gx, gy;
if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) {
gx =
1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] +
2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] +
1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)];
gy =
1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] +
(-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)];
intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy));
direction[y * iw + x] = atan2((float)gy, (float)gx);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16applySobelFilterPhS_Pfii
.globl _Z16applySobelFilterPhS_Pfii
.p2align 8
.type _Z16applySobelFilterPhS_Pfii,@function
_Z16applySobelFilterPhS_Pfii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[8:9], s[0:1], 0x18
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_add_nc_u32_e32 v2, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v3, 1, v1
v_min_i32_e32 v4, v1, v0
v_cmp_gt_i32_e32 vcc_lo, s9, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_gt_i32_e64 s2, s8, v3
v_cmp_lt_i32_e64 s3, 0, v4
s_delay_alu instid0(VALU_DEP_2)
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v4, -1, v1
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
v_mul_lo_u32 v13, v1, s9
v_add_nc_u32_e32 v1, -1, v0
v_mul_lo_u32 v9, v4, s9
v_mul_lo_u32 v10, v3, s9
s_mov_b32 s0, 0x3b2d2a58
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_nc_u32_e32 v7, v13, v1
v_add_nc_u32_e32 v11, v13, v2
v_add_nc_u32_e32 v3, v9, v1
v_add_nc_u32_e32 v5, v9, v2
v_add_nc_u32_e32 v1, v10, v1
v_ashrrev_i32_e32 v8, 31, v7
v_ashrrev_i32_e32 v12, 31, v11
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v6, 31, v5
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
v_add_co_u32 v5, vcc_lo, s4, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo
v_add_co_u32 v7, vcc_lo, s4, v7
s_clause 0x1
global_load_u8 v14, v[3:4], off
global_load_u8 v15, v[5:6], off
v_add_nc_u32_e32 v4, v10, v2
v_ashrrev_i32_e32 v3, 31, v1
v_add_nc_u32_e32 v6, v9, v0
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v8, vcc_lo
v_add_co_u32 v1, vcc_lo, s4, v1
v_ashrrev_i32_e32 v5, 31, v4
v_add_nc_u32_e32 v9, v10, v0
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v3, vcc_lo, s4, v4
v_ashrrev_i32_e32 v10, 31, v6
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v5, vcc_lo
v_ashrrev_i32_e32 v16, 31, v9
v_add_co_u32 v5, vcc_lo, s4, v6
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v10, vcc_lo
v_add_co_u32 v9, vcc_lo, s4, v9
v_add_co_ci_u32_e32 v10, vcc_lo, s5, v16, vcc_lo
v_add_co_u32 v11, vcc_lo, s4, v11
v_add_co_ci_u32_e32 v12, vcc_lo, s5, v12, vcc_lo
s_clause 0x5
global_load_u8 v1, v[1:2], off
global_load_u8 v2, v[3:4], off
global_load_u8 v3, v[5:6], off
global_load_u8 v4, v[9:10], off
global_load_u8 v5, v[7:8], off
global_load_u8 v6, v[11:12], off
s_waitcnt vmcnt(5)
v_add_nc_u32_e32 v7, v14, v1
s_waitcnt vmcnt(4)
v_add_nc_u32_e32 v8, v15, v2
s_waitcnt vmcnt(3)
v_lshlrev_b32_e32 v3, 1, v3
s_waitcnt vmcnt(2)
v_lshlrev_b32_e32 v4, 1, v4
s_waitcnt vmcnt(0)
v_sub_nc_u32_e32 v5, v5, v6
v_sub_nc_u32_e32 v6, v7, v8
v_add3_u32 v7, v15, v14, v3
v_add3_u32 v4, v2, v1, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v1, v5, 1, v6
v_sub_nc_u32_e32 v2, v7, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cvt_f32_i32_e32 v3, v1
v_cvt_f32_i32_e32 v5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_max_f32_e64 v2, |v3|, |v5|
v_min_f32_e64 v8, |v3|, |v5|
v_frexp_mant_f32_e32 v6, v2
v_frexp_exp_i32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_frexp_exp_i32_f32_e32 v9, v8
v_frexp_mant_f32_e32 v8, v8
v_rcp_f32_e32 v6, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v2, v9, v2
v_mul_f32_e32 v9, v5, v5
s_waitcnt_depctr 0xfff
v_dual_fmac_f32 v9, v3, v3 :: v_dual_mul_f32 v6, v8, v6
v_mul_f32_e32 v10, 0x4f800000, v9
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v9
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ldexp_f32 v2, v6, v2
v_cndmask_b32_e32 v9, v9, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_f32_e32 v6, v2, v2
v_sqrt_f32_e32 v10, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_fmaak_f32 v8, s0, v6, 0xbc7a590c
s_waitcnt_depctr 0xfff
v_dual_fmaak_f32 v8, v6, v8, 0x3d29fb3f :: v_dual_add_nc_u32 v11, -1, v10
v_fmaak_f32 v8, v6, v8, 0xbd97d4d7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v12, -v11, v10, v9
v_fmaak_f32 v8, v6, v8, 0x3dd931b2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_ge_f32_e64 s0, 0, v12
v_fmaak_f32 v8, v6, v8, 0xbe1160e6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmaak_f32 v8, v6, v8, 0x3e4cb8bf
v_fmaak_f32 v8, v6, v8, 0xbeaaaa62
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_f32_e32 v6, v6, v8
v_add_nc_u32_e32 v8, 1, v10
v_fmac_f32_e32 v2, v2, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_fma_f32 v6, -v8, v10, v9
v_cndmask_b32_e64 v10, v10, v11, s0
v_sub_f32_e32 v11, 0x3fc90fdb, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_f32_e64 s0, 0, v6
v_cndmask_b32_e64 v6, v10, v8, s0
v_cmp_gt_f32_e64 s0, |v5|, |v3|
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_mul_f32 v3, 0x37800000, v6 :: v_dual_add_nc_u32 v0, v13, v0
v_cndmask_b32_e64 v2, v2, v11, s0
v_cmp_gt_i32_e64 s0, 0, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_ashrrev_i32_e32 v1, 31, v0
v_cndmask_b32_e32 v6, v6, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_sub_f32_e32 v8, 0x40490fdb, v2
v_cmp_class_f32_e64 vcc_lo, v9, 0x260
v_cndmask_b32_e64 v10, 0, 0x40490fdb, s0
v_cndmask_b32_e64 v8, v2, v8, s0
v_cndmask_b32_e32 v6, v6, v9, vcc_lo
v_cmp_eq_u32_e32 vcc_lo, v7, v4
v_lshlrev_b64 v[2:3], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
v_cvt_i32_f32_e32 v6, v6
v_cndmask_b32_e32 v4, v8, v10, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v2, vcc_lo, s2, v2
v_bfi_b32 v4, 0x7fffffff, v4, v5
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
global_store_b8 v[0:1], v6, off
global_store_b32 v[2:3], v4, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16applySobelFilterPhS_Pfii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 17
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16applySobelFilterPhS_Pfii, .Lfunc_end0-_Z16applySobelFilterPhS_Pfii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16applySobelFilterPhS_Pfii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16applySobelFilterPhS_Pfii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 17
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
using namespace std;
#define GAUSS_WIDTH 5
#define SOBEL_WIDTH 3
typedef struct images {
char *pType;
int width;
int height;
int maxValColor;
unsigned char *data;
} image;
/**
Reads the input file formatted as pnm. The actual implementation
supports only P5 type pnm images (grayscale).
*/
__global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int gx, gy;
if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) {
gx =
1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] +
2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] +
1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)];
gy =
1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] +
(-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)];
intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy));
direction[y * iw + x] = atan2((float)gy, (float)gx);
}
} | .text
.file "applySobelFilter.hip"
.globl _Z31__device_stub__applySobelFilterPhS_Pfii # -- Begin function _Z31__device_stub__applySobelFilterPhS_Pfii
.p2align 4, 0x90
.type _Z31__device_stub__applySobelFilterPhS_Pfii,@function
_Z31__device_stub__applySobelFilterPhS_Pfii: # @_Z31__device_stub__applySobelFilterPhS_Pfii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16applySobelFilterPhS_Pfii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__applySobelFilterPhS_Pfii, .Lfunc_end0-_Z31__device_stub__applySobelFilterPhS_Pfii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16applySobelFilterPhS_Pfii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16applySobelFilterPhS_Pfii,@object # @_Z16applySobelFilterPhS_Pfii
.section .rodata,"a",@progbits
.globl _Z16applySobelFilterPhS_Pfii
.p2align 3, 0x0
_Z16applySobelFilterPhS_Pfii:
.quad _Z31__device_stub__applySobelFilterPhS_Pfii
.size _Z16applySobelFilterPhS_Pfii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16applySobelFilterPhS_Pfii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__applySobelFilterPhS_Pfii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16applySobelFilterPhS_Pfii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00053d1f_00000000-6_applySobelFilter.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii
.type _Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii, @function
_Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16applySobelFilterPhS_Pfii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii, .-_Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii
.globl _Z16applySobelFilterPhS_Pfii
.type _Z16applySobelFilterPhS_Pfii, @function
_Z16applySobelFilterPhS_Pfii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16applySobelFilterPhS_PfiiPhS_Pfii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16applySobelFilterPhS_Pfii, .-_Z16applySobelFilterPhS_Pfii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16applySobelFilterPhS_Pfii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16applySobelFilterPhS_Pfii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "applySobelFilter.hip"
.globl _Z31__device_stub__applySobelFilterPhS_Pfii # -- Begin function _Z31__device_stub__applySobelFilterPhS_Pfii
.p2align 4, 0x90
.type _Z31__device_stub__applySobelFilterPhS_Pfii,@function
_Z31__device_stub__applySobelFilterPhS_Pfii: # @_Z31__device_stub__applySobelFilterPhS_Pfii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16applySobelFilterPhS_Pfii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__applySobelFilterPhS_Pfii, .Lfunc_end0-_Z31__device_stub__applySobelFilterPhS_Pfii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16applySobelFilterPhS_Pfii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16applySobelFilterPhS_Pfii,@object # @_Z16applySobelFilterPhS_Pfii
.section .rodata,"a",@progbits
.globl _Z16applySobelFilterPhS_Pfii
.p2align 3, 0x0
_Z16applySobelFilterPhS_Pfii:
.quad _Z31__device_stub__applySobelFilterPhS_Pfii
.size _Z16applySobelFilterPhS_Pfii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16applySobelFilterPhS_Pfii"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__applySobelFilterPhS_Pfii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16applySobelFilterPhS_Pfii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <cublas.h>
#include <math.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
extern const char* get_last_cuda_error() {
cudaError_t err = cudaGetLastError();
return cudaGetErrorString( err);
}
extern int cublas_init() {
cublasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
cublasShutdown();
cudaThreadExit();
return 0;
}
extern int cuda_set_device(int deviceId) {
cudaSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
cudaThreadSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int allocate_device_memory_sparse(cudamat_sparse* mat) {
int nnz = mat->nnz, rows = mat->size[0];
cublasStatus stat;
stat = cublasAlloc(nnz, sizeof(mat->data_device.data[0]), (void**)&mat->data_device.data);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(nnz, sizeof(mat->data_device.indices[0]), (void**)&mat->data_device.indices);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(rows + 1, sizeof(mat->data_device.indptr[0]), (void**)&mat->data_device.indptr);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_sparse_to_device(cudamat_sparse* mat) {
int len = mat->nnz, rows = mat->size[0];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_sparse(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host.data[0]), mat->data_host.data, 1, mat->data_device.data, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(len, sizeof(mat->data_host.indices[0]), mat->data_host.indices, 1, mat->data_device.indices, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(rows + 1, sizeof(mat->data_host.indptr[0]), mat->data_host.indptr, 1, mat->data_device.indptr, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int set_shape(cudamat* mat, unsigned int m, unsigned int n) {
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector.
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern void init_from_sparse_array(cudamat_sparse* mat, float* data, int* indices, int* indptr, int m, int n, int nnz) {
mat->data_host.data = data;
mat->data_host.indices = indices;
mat->data_host.indptr = indptr;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->nnz = nnz;
}
extern void set_on_device(cudamat* mat) {
mat->on_device = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_bernoulli(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulli<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_bernoulli_tanh(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulliTanh<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_poisson(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSamplePoisson<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_gaussian(rnd_struct* rnd_state, cudamat* mat, cudamat* target, float mult) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len, mult);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int perturb_energy(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbEnergy<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int perturb_prob(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbProb<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int dropout(rnd_struct* rnd_state, cudamat* mat, float dropprob, float val) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomDropout<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, dropprob, val);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int div_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int div_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int upper_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int lower_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int upper_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int lower_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int choose_max_and_accumulate(cudamat* mat, cudamat* acc) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !acc->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (acc->size[0] != mat->size[0] || acc->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxAndAccumulate<<<gridDim,32>>>(mat->data_device, acc->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int choose_max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kArgMaxColumnwise<<<gridDim,32>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sqsum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSqSumColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h, mult, p);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int normlimit_by_axis(cudamat* mat, cudamat* target, int axis,
float norm) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kNormLimitColumnwise<<<gridDim,32, shared_mem_size>>>(mat->data_device, target->data_device, norm, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_cos(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyCos<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sin(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySin<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyTanh<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyAbs<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLog<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_ceil(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCeil<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_floor(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kFloor<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSqrt<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPow<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPowMatrix<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int compute_cross_entropy(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int compute_cross_entropy_bernoulli(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropyBernoulli<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int correct_preds(cudamat* mat, cudamat* pow, cudamat* target, float cutoff) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCorrectPreds<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, cutoff);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kReciprocal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
cudaThreadSynchronize();
return 0;
}
extern int sparse_dot(cudamat_sparse* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = mat1->size[0],
k = mat1->size[1],
k2 = mat2->size[0],
n = mat2->size[1];
if (k != k2) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
unsigned int grid_x = m / COPY_BLOCK_SIZE;
if (m % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = n / COPY_BLOCK_SIZE;
if (n % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kSparseDot<<<grid, threads>>>(m, n, k, mat1->data_device.data,
mat1->data_device.indptr,
mat1->data_device.indices,
mat2->data_device, target->data_device, beta, alpha);
if (check_cublas_error())
return CUBLAS_ERROR;
cudaThreadSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat2->size[0] * mat2->size[1] != len) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_mult_sign(cudamat* mat1, cudamat* mat2, float mult) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddMultSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, len, mult);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSubtract<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivide<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sin_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSinDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_cos_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCosDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_logistic_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLogisticDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kTanhDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_rectified_linear_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_rectified_linear_smooth_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearSmoothDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kAssignScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivideScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = cublasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int swapColumns(cudamat* source, cudamat* target, cudamat* indices1, cudamat* indices2){
const int cols = indices1->size[1]*indices1->size[0],
h = source->size[0],
w = source->size[1];
kSwapColumns<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, target->data_device, indices1->data_device, indices2->data_device, cols, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int generate_translations_big_var_off(cudamat* source, cudamat* target, cudamat* off_x, cudamat* off_y, int source_w, int target_w, int num_channels) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kGenerateTranslationsBigVarOff<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, off_x->data_device, off_y->data_device, source_w, target_w, num_channels);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int blockify(cudamat* source, cudamat* target, int blocksize) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kBlockify<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, source->size[0], blocksize);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int softmax(cudamat* mat, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMax<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int softmax_overwrite(cudamat* mat) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxOverwrite<<<gridDim, 32, shared_mem_size>>>(mat->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_softmax_grad(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxGrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_correct(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxCorrect<<<gridDim, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int accumulate_columns(cudamat* mat, cudamat* indices, cudamat* target, float mult, int avg) {
unsigned int h = mat->size[0],
w = mat->size[1],
w2 = target->size[1];
if (!mat->on_device || !indices->on_device|| !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (NUM_VECTOR_OP_THREADS_PER_BLOCK < w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAccumulateColumns<<<h, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, indices->data_device, target->data_device, w, w2, h, mult, avg);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_cross_entropy(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int expand(cudamat* source, cudamat* indices, cudamat* target){
unsigned int h = source->size[0],
w = source->size[1],
w2 = target->size[1];
if (!source->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpand<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, indices->data_device, target->data_device, h, w, w2);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int expand_and_add(cudamat* source, cudamat* mat, cudamat* indices, cudamat* target, float mult){
unsigned int h = source->size[0],
w = source->size[1],
w2 = mat->size[1];
if (!source->on_device || !mat->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpandAndAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, mat->data_device, indices->data_device, target->data_device, w, h, mult, w2);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <cublas.h>
#include <math.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
extern const char* get_last_cuda_error() {
cudaError_t err = cudaGetLastError();
return cudaGetErrorString( err);
}
extern int cublas_init() {
cublasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
cublasShutdown();
cudaThreadExit();
return 0;
}
extern int cuda_set_device(int deviceId) {
cudaSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
cudaThreadSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int allocate_device_memory_sparse(cudamat_sparse* mat) {
int nnz = mat->nnz, rows = mat->size[0];
cublasStatus stat;
stat = cublasAlloc(nnz, sizeof(mat->data_device.data[0]), (void**)&mat->data_device.data);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(nnz, sizeof(mat->data_device.indices[0]), (void**)&mat->data_device.indices);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(rows + 1, sizeof(mat->data_device.indptr[0]), (void**)&mat->data_device.indptr);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_sparse_to_device(cudamat_sparse* mat) {
int len = mat->nnz, rows = mat->size[0];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_sparse(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host.data[0]), mat->data_host.data, 1, mat->data_device.data, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(len, sizeof(mat->data_host.indices[0]), mat->data_host.indices, 1, mat->data_device.indices, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(rows + 1, sizeof(mat->data_host.indptr[0]), mat->data_host.indptr, 1, mat->data_device.indptr, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int set_shape(cudamat* mat, unsigned int m, unsigned int n) {
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector.
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern void init_from_sparse_array(cudamat_sparse* mat, float* data, int* indices, int* indptr, int m, int n, int nnz) {
mat->data_host.data = data;
mat->data_host.indices = indices;
mat->data_host.indptr = indptr;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->nnz = nnz;
}
extern void set_on_device(cudamat* mat) {
mat->on_device = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_bernoulli(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulli<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_bernoulli_tanh(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulliTanh<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_poisson(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSamplePoisson<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_gaussian(rnd_struct* rnd_state, cudamat* mat, cudamat* target, float mult) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len, mult);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int perturb_energy(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbEnergy<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int perturb_prob(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbProb<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int dropout(rnd_struct* rnd_state, cudamat* mat, float dropprob, float val) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomDropout<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, dropprob, val);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int div_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int div_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int upper_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int lower_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int upper_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int lower_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int choose_max_and_accumulate(cudamat* mat, cudamat* acc) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !acc->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (acc->size[0] != mat->size[0] || acc->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxAndAccumulate<<<gridDim,32>>>(mat->data_device, acc->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int choose_max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kArgMaxColumnwise<<<gridDim,32>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sqsum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSqSumColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h, mult, p);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int normlimit_by_axis(cudamat* mat, cudamat* target, int axis,
float norm) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kNormLimitColumnwise<<<gridDim,32, shared_mem_size>>>(mat->data_device, target->data_device, norm, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_cos(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyCos<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sin(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySin<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyTanh<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyAbs<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLog<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_ceil(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCeil<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_floor(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kFloor<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSqrt<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPow<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPowMatrix<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int compute_cross_entropy(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int compute_cross_entropy_bernoulli(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropyBernoulli<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int correct_preds(cudamat* mat, cudamat* pow, cudamat* target, float cutoff) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCorrectPreds<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, cutoff);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kReciprocal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
cudaThreadSynchronize();
return 0;
}
extern int sparse_dot(cudamat_sparse* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = mat1->size[0],
k = mat1->size[1],
k2 = mat2->size[0],
n = mat2->size[1];
if (k != k2) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
unsigned int grid_x = m / COPY_BLOCK_SIZE;
if (m % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = n / COPY_BLOCK_SIZE;
if (n % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kSparseDot<<<grid, threads>>>(m, n, k, mat1->data_device.data,
mat1->data_device.indptr,
mat1->data_device.indices,
mat2->data_device, target->data_device, beta, alpha);
if (check_cublas_error())
return CUBLAS_ERROR;
cudaThreadSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat2->size[0] * mat2->size[1] != len) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_mult_sign(cudamat* mat1, cudamat* mat2, float mult) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddMultSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, len, mult);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSubtract<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivide<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sin_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSinDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_cos_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCosDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_logistic_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLogisticDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kTanhDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_rectified_linear_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_rectified_linear_smooth_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearSmoothDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kAssignScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivideScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = cublasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int swapColumns(cudamat* source, cudamat* target, cudamat* indices1, cudamat* indices2){
const int cols = indices1->size[1]*indices1->size[0],
h = source->size[0],
w = source->size[1];
kSwapColumns<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, target->data_device, indices1->data_device, indices2->data_device, cols, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int generate_translations_big_var_off(cudamat* source, cudamat* target, cudamat* off_x, cudamat* off_y, int source_w, int target_w, int num_channels) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kGenerateTranslationsBigVarOff<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, off_x->data_device, off_y->data_device, source_w, target_w, num_channels);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int blockify(cudamat* source, cudamat* target, int blocksize) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kBlockify<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, source->size[0], blocksize);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int softmax(cudamat* mat, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMax<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int softmax_overwrite(cudamat* mat) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxOverwrite<<<gridDim, 32, shared_mem_size>>>(mat->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_softmax_grad(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxGrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_correct(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxCorrect<<<gridDim, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int accumulate_columns(cudamat* mat, cudamat* indices, cudamat* target, float mult, int avg) {
unsigned int h = mat->size[0],
w = mat->size[1],
w2 = target->size[1];
if (!mat->on_device || !indices->on_device|| !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (NUM_VECTOR_OP_THREADS_PER_BLOCK < w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAccumulateColumns<<<h, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, indices->data_device, target->data_device, w, w2, h, mult, avg);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_cross_entropy(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int expand(cudamat* source, cudamat* indices, cudamat* target){
unsigned int h = source->size[0],
w = source->size[1],
w2 = target->size[1];
if (!source->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpand<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, indices->data_device, target->data_device, h, w, w2);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int expand_and_add(cudamat* source, cudamat* mat, cudamat* indices, cudamat* target, float mult){
unsigned int h = source->size[0],
w = source->size[1],
w2 = mat->size[1];
if (!source->on_device || !mat->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpandAndAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, mat->data_device, indices->data_device, target->data_device, w, h, mult, w2);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
} | #include <hip/hip_runtime.h>
#define ERROR_INCOMPATIBLE_DIMENSIONS -1
#define CUBLAS_ERROR -2
#define CUDA_ERROR -3
#define VIEW_ERROR -4
#define ERROR_TRANSPOSED -5
#define ERROR_GENERIC -6
#define ERROR_TRANSPOSEDNESS -7
#define ERROR_NOT_ON_DEVICE -8
#define ERROR_UNSUPPORTED -9
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
#ifndef MIN
#define MIN(x,y) ((x < y) ? x : y)
#endif
#ifndef MAX
#define MAX(x,y) ((x > y) ? x : y)
#endif
struct cudamat {
float* data_host;
float* data_device;
int on_device;
int on_host;
int size[2];
int is_trans; // 0 or 1
int owns_data;
};
struct rnd_struct {
unsigned int* dev_mults;
unsigned long long* dev_words;
};
struct sparse_data {
int *indices, *indptr;
float* data;
};
struct cudamat_sparse {
sparse_data data_host;
sparse_data data_device;
int on_device;
int on_host;
int size[2];
int is_trans; // 0 or 1
int owns_data;
int nnz;
}; |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#define ERROR_INCOMPATIBLE_DIMENSIONS -1
#define CUBLAS_ERROR -2
#define CUDA_ERROR -3
#define VIEW_ERROR -4
#define ERROR_TRANSPOSED -5
#define ERROR_GENERIC -6
#define ERROR_TRANSPOSEDNESS -7
#define ERROR_NOT_ON_DEVICE -8
#define ERROR_UNSUPPORTED -9
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
#ifndef MIN
#define MIN(x,y) ((x < y) ? x : y)
#endif
#ifndef MAX
#define MAX(x,y) ((x > y) ? x : y)
#endif
struct cudamat {
float* data_host;
float* data_device;
int on_device;
int on_host;
int size[2];
int is_trans; // 0 or 1
int owns_data;
};
struct rnd_struct {
unsigned int* dev_mults;
unsigned long long* dev_words;
};
struct sparse_data {
int *indices, *indptr;
float* data;
};
struct cudamat_sparse {
sparse_data data_host;
sparse_data data_device;
int on_device;
int on_host;
int size[2];
int is_trans; // 0 or 1
int owns_data;
int nnz;
}; | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#define ERROR_INCOMPATIBLE_DIMENSIONS -1
#define CUBLAS_ERROR -2
#define CUDA_ERROR -3
#define VIEW_ERROR -4
#define ERROR_TRANSPOSED -5
#define ERROR_GENERIC -6
#define ERROR_TRANSPOSEDNESS -7
#define ERROR_NOT_ON_DEVICE -8
#define ERROR_UNSUPPORTED -9
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
#ifndef MIN
#define MIN(x,y) ((x < y) ? x : y)
#endif
#ifndef MAX
#define MAX(x,y) ((x > y) ? x : y)
#endif
struct cudamat {
float* data_host;
float* data_device;
int on_device;
int on_host;
int size[2];
int is_trans; // 0 or 1
int owns_data;
};
struct rnd_struct {
unsigned int* dev_mults;
unsigned long long* dev_words;
};
struct sparse_data {
int *indices, *indptr;
float* data;
};
struct cudamat_sparse {
sparse_data data_host;
sparse_data data_device;
int on_device;
int on_host;
int size[2];
int is_trans; // 0 or 1
int owns_data;
int nnz;
}; | .text
.file "cudamat.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // Elapsed Real Time for input-4.txt: real 0m29.789s
#include <stdio.h>
#include <stdbool.h>
#include <cuda_runtime.h>
// Size of the square we're looking for.
#define SQUARE_WIDTH 6
#define SQUARE_HEIGHT 6
// Maximum width of a row. Makes it easier to allocate the whole
// grid contiguously.
#define MAX_WIDTH 16384
// Type used for a row of the grid. Makes it easier to declare the
// grid as a pointer.
typedef char Row[ MAX_WIDTH ];
// Kernel, run by each thread to count complete squares in parallel.
__global__ void countSquares( int rows, int cols, bool report, Row *grid, int *output) {
// Unique index for this worker.
int r0 = blockDim.x * blockIdx.x + threadIdx.x;
// Make sure I actually have something to work on.
if ( r0 + SQUARE_HEIGHT - 1 < rows ) {
int total = 0;
//TODO logic goes here
int startIdxCols = 0; //this is actually column
int endIdxCols = startIdxCols + 6;
int startIdxRows = r0; //so this is actually row
int endIdxRows = startIdxRows + 6;
int colidx = 0;
int rowidx = 0;
char square[6][6];
while (endIdxCols <= cols) {
//fill in square 2d array
for (int i = startIdxRows; i < endIdxRows; i++) {
for (int j = startIdxCols; j < endIdxCols; j++) {
square[rowidx][colidx] = grid[i][j];
// printf("%c %d %d\n", grid[i][j], i, j);
// printf("%c\n", square[rowidx][colidx]);
colidx++;
}
rowidx++;
colidx = 0;
}
rowidx = 0;
//at this point square is made
//so check if it's valid square
bool isValid = false;
char *knownLetters = (char *)malloc(26 * sizeof(char));
for (int i = 0; i < 26; i++) {
*(knownLetters + i) = '*';
}
int counter = 0;
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
bool exists = false;
for (int k = 0; k <= counter; k++) {
if (square[i][j] == *(knownLetters + k)) { //if letter is found
exists = true;
}
}
if (!exists) {
*(knownLetters + counter) = square[i][j];
counter++;
}
}
}
free(knownLetters);
if (counter == 26) {
isValid = true;
}
//End of boolean function
if (isValid) {
total++;
if (report) {
printf("%d %d\n", startIdxRows, startIdxCols);
}
}
endIdxCols++;
startIdxCols++;
} //end of while loop
*(output + r0) = total; //save the total number of squares to the unique index
// printf("Total squares is %d with thread ID %d\n", total, r0);
} //end of if statement
}
// Size of the grid of characters.
int rows, cols;
// Grid of letters.
Row *grid;
// Read the grid of characters.
void readGrid() {
// Read grdi dimensions.
scanf( "%d%d", &rows, &cols );
if ( cols > MAX_WIDTH ) {
fprintf( stderr, "Input grid is too wide.\n" );
exit( EXIT_FAILURE );
}
// Make space to store the grid as a big, contiguous array.
grid = (Row *) malloc( rows * sizeof( Row ) );
// Read each row of the grid as a string, then copy everything
// but the null terminator into the grid array.
int rowCount = 0;
char buffer[ MAX_WIDTH + 1 ];
while ( rowCount < rows ) {
scanf( "%s", buffer );
memcpy( grid[ rowCount++ ], buffer, cols );
}
}
// General function to report a failure and exit.
static void fail( char const *message ) {
fprintf( stderr, "%s\n", message );
exit( 1 );
}
// Print out a usage message, then exit.
static void usage() {
printf( "usage: square [report]\n" );
exit( 1 );
}
int main( int argc, char *argv[] ) {
// If there's an argument, it better be "report"
bool report = false;
if ( argc == 2 ) {
if ( strcmp( argv[ 1 ], "report" ) != 0 )
usage();
report = true;
}
// squareFound = false;
readGrid();
/** Array used to hold each squares found in each threads */
// int *reportedSquares = (int *)malloc(rows * sizeof(int));
// TODO Need to add code to allocate memory on the device and copy the grid
// over.
Row *rowGrid = NULL;
cudaMalloc((void **)&rowGrid, rows * sizeof(Row)); //allocate memory for 2 arrays
cudaMemcpy(rowGrid, grid, rows * sizeof(Row), cudaMemcpyHostToDevice);
// Block and grid dimensions.
int threadsPerBlock = 250;
// Round up.
int blocksPerGrid = ( rows + threadsPerBlock - 1 ) / threadsPerBlock;
// int *output = (int *)malloc(rows * sizeof(int));
int *output = NULL;
cudaMalloc((void **)&output, rows * sizeof(int));
cudaMemset(output, 0x00, rows * sizeof(int));
// printf("\n\n");
// Run our kernel on these block/grid dimensions
countSquares<<<blocksPerGrid, threadsPerBlock>>>( rows, cols, report, rowGrid, output);
if ( cudaGetLastError() != cudaSuccess )
fail( "Failure in CUDA kernel execution." );
// TODO Need to add code to copy the results list back to the host and
// add them up.
// int *squareNums = NULL;
int *mySquareNums = (int *)malloc(rows * sizeof(int));
// cudaMalloc((void **)&squareNums, rows * sizeof(int));
cudaMemcpy(mySquareNums, output, rows * sizeof(int), cudaMemcpyDeviceToHost);
// for (int i = 0; i < rows; i++) {
// *(output + i) = 0;
// mySquareNums[i] = 0;
// }
int total = 0;
for (int i = 0; i < rows; i++) {
total += mySquareNums[i];
}
printf( "Squares: %d\n", total );
cudaFree(rowGrid);
cudaFree(output);
free(mySquareNums);
// Free memory on the device and the host.
free( grid );
cudaDeviceReset(); //reset the device
return 0;
} | .file "tmpxft_00006561_00000000-6_square.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d%d"
.LC1:
.string "Input grid is too wide.\n"
.LC2:
.string "%s"
.text
.globl _Z8readGridv
.type _Z8readGridv, @function
_Z8readGridv:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
leaq -16384(%rsp), %r11
.cfi_def_cfa 11, 16416
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $16, %rsp
.cfi_def_cfa_offset 16432
movq %fs:40, %rax
movq %rax, 16392(%rsp)
xorl %eax, %eax
leaq cols(%rip), %rdx
leaq rows(%rip), %rsi
leaq .LC0(%rip), %rdi
call __isoc23_scanf@PLT
cmpl $16384, cols(%rip)
jg .L10
movl rows(%rip), %ebx
movslq %ebx, %rdi
salq $14, %rdi
call malloc@PLT
movq %rax, grid(%rip)
testl %ebx, %ebx
jle .L3
movl $0, %ebx
leaq .LC2(%rip), %r12
.L6:
movq %rsp, %rbp
movq %rbp, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movq %rbx, %rdi
salq $14, %rdi
addq grid(%rip), %rdi
movslq cols(%rip), %rdx
movq %rbp, %rsi
call memcpy@PLT
addq $1, %rbx
cmpl %ebx, rows(%rip)
jg .L6
.L3:
movq 16392(%rsp), %rax
subq %fs:40, %rax
jne .L11
addq $16400, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L11:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z8readGridv, .-_Z8readGridv
.globl _Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi
.type _Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi, @function
_Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rcx, 8(%rsp)
movq %r8, (%rsp)
movb %dl, 20(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12countSquaresiibPA16384_cPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L12
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi, .-_Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi
.globl _Z12countSquaresiibPA16384_cPi
.type _Z12countSquaresiibPA16384_cPi, @function
_Z12countSquaresiibPA16384_cPi:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movzbl %dl, %edx
call _Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z12countSquaresiibPA16384_cPi, .-_Z12countSquaresiibPA16384_cPi
.section .rodata.str1.1
.LC3:
.string "report"
.LC4:
.string "usage: square [report]\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "Failure in CUDA kernel execution."
.section .rodata.str1.1
.LC6:
.string "%s\n"
.LC7:
.string "Squares: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $56, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
cmpl $2, %edi
jne .L27
movq 8(%rsi), %rdi
leaq .LC3(%rip), %rsi
call strcmp@PLT
movl $1, %ebp
testl %eax, %eax
jne .L32
.L21:
call _Z8readGridv
movq $0, (%rsp)
movslq rows(%rip), %rsi
salq $14, %rsi
movq %rsp, %rdi
call cudaMalloc@PLT
movslq rows(%rip), %rdx
salq $14, %rdx
movl $1, %ecx
movq grid(%rip), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl rows(%rip), %ebx
movq $0, 8(%rsp)
movslq %ebx, %rsi
salq $2, %rsi
leaq 8(%rsp), %rdi
call cudaMalloc@PLT
movslq rows(%rip), %rdx
salq $2, %rdx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
movl $250, 28(%rsp)
movl $1, 32(%rsp)
addl $249, %ebx
movslq %ebx, %rax
imulq $274877907, %rax, %rax
sarq $36, %rax
sarl $31, %ebx
subl %ebx, %eax
movl %eax, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L33
.L22:
call cudaGetLastError@PLT
testl %eax, %eax
jne .L34
movslq rows(%rip), %rbp
salq $2, %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, %rbx
movl $2, %ecx
movq %rbp, %rdx
movq 8(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
movl rows(%rip), %edx
testl %edx, %edx
jle .L29
movq %rbx, %rax
movslq %edx, %rdx
leaq (%rbx,%rdx,4), %rcx
movl $0, %edx
.L25:
addl (%rax), %edx
addq $4, %rax
cmpq %rcx, %rax
jne .L25
.L24:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq grid(%rip), %rdi
call free@PLT
call cudaDeviceReset@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L35
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L27:
movl $0, %ebp
jmp .L21
.L33:
movzbl %bpl, %edx
movq 8(%rsp), %r8
movq (%rsp), %rcx
movl cols(%rip), %esi
movl rows(%rip), %edi
call _Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi
jmp .L22
.L34:
leaq .LC5(%rip), %rcx
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L29:
movl $0, %edx
jmp .L24
.L35:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC8:
.string "_Z12countSquaresiibPA16384_cPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z12countSquaresiibPA16384_cPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl grid
.bss
.align 8
.type grid, @object
.size grid, 8
grid:
.zero 8
.globl cols
.align 4
.type cols, @object
.size cols, 4
cols:
.zero 4
.globl rows
.align 4
.type rows, @object
.size rows, 4
rows:
.zero 4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // Elapsed Real Time for input-4.txt: real 0m29.789s
#include <stdio.h>
#include <stdbool.h>
#include <cuda_runtime.h>
// Size of the square we're looking for.
#define SQUARE_WIDTH 6
#define SQUARE_HEIGHT 6
// Maximum width of a row. Makes it easier to allocate the whole
// grid contiguously.
#define MAX_WIDTH 16384
// Type used for a row of the grid. Makes it easier to declare the
// grid as a pointer.
typedef char Row[ MAX_WIDTH ];
// Kernel, run by each thread to count complete squares in parallel.
__global__ void countSquares( int rows, int cols, bool report, Row *grid, int *output) {
// Unique index for this worker.
int r0 = blockDim.x * blockIdx.x + threadIdx.x;
// Make sure I actually have something to work on.
if ( r0 + SQUARE_HEIGHT - 1 < rows ) {
int total = 0;
//TODO logic goes here
int startIdxCols = 0; //this is actually column
int endIdxCols = startIdxCols + 6;
int startIdxRows = r0; //so this is actually row
int endIdxRows = startIdxRows + 6;
int colidx = 0;
int rowidx = 0;
char square[6][6];
while (endIdxCols <= cols) {
//fill in square 2d array
for (int i = startIdxRows; i < endIdxRows; i++) {
for (int j = startIdxCols; j < endIdxCols; j++) {
square[rowidx][colidx] = grid[i][j];
// printf("%c %d %d\n", grid[i][j], i, j);
// printf("%c\n", square[rowidx][colidx]);
colidx++;
}
rowidx++;
colidx = 0;
}
rowidx = 0;
//at this point square is made
//so check if it's valid square
bool isValid = false;
char *knownLetters = (char *)malloc(26 * sizeof(char));
for (int i = 0; i < 26; i++) {
*(knownLetters + i) = '*';
}
int counter = 0;
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
bool exists = false;
for (int k = 0; k <= counter; k++) {
if (square[i][j] == *(knownLetters + k)) { //if letter is found
exists = true;
}
}
if (!exists) {
*(knownLetters + counter) = square[i][j];
counter++;
}
}
}
free(knownLetters);
if (counter == 26) {
isValid = true;
}
//End of boolean function
if (isValid) {
total++;
if (report) {
printf("%d %d\n", startIdxRows, startIdxCols);
}
}
endIdxCols++;
startIdxCols++;
} //end of while loop
*(output + r0) = total; //save the total number of squares to the unique index
// printf("Total squares is %d with thread ID %d\n", total, r0);
} //end of if statement
}
// Size of the grid of characters.
int rows, cols;
// Grid of letters.
Row *grid;
// Read the grid of characters.
void readGrid() {
// Read grdi dimensions.
scanf( "%d%d", &rows, &cols );
if ( cols > MAX_WIDTH ) {
fprintf( stderr, "Input grid is too wide.\n" );
exit( EXIT_FAILURE );
}
// Make space to store the grid as a big, contiguous array.
grid = (Row *) malloc( rows * sizeof( Row ) );
// Read each row of the grid as a string, then copy everything
// but the null terminator into the grid array.
int rowCount = 0;
char buffer[ MAX_WIDTH + 1 ];
while ( rowCount < rows ) {
scanf( "%s", buffer );
memcpy( grid[ rowCount++ ], buffer, cols );
}
}
// General function to report a failure and exit.
static void fail( char const *message ) {
fprintf( stderr, "%s\n", message );
exit( 1 );
}
// Print out a usage message, then exit.
static void usage() {
printf( "usage: square [report]\n" );
exit( 1 );
}
int main( int argc, char *argv[] ) {
// If there's an argument, it better be "report"
bool report = false;
if ( argc == 2 ) {
if ( strcmp( argv[ 1 ], "report" ) != 0 )
usage();
report = true;
}
// squareFound = false;
readGrid();
/** Array used to hold each squares found in each threads */
// int *reportedSquares = (int *)malloc(rows * sizeof(int));
// TODO Need to add code to allocate memory on the device and copy the grid
// over.
Row *rowGrid = NULL;
cudaMalloc((void **)&rowGrid, rows * sizeof(Row)); //allocate memory for 2 arrays
cudaMemcpy(rowGrid, grid, rows * sizeof(Row), cudaMemcpyHostToDevice);
// Block and grid dimensions.
int threadsPerBlock = 250;
// Round up.
int blocksPerGrid = ( rows + threadsPerBlock - 1 ) / threadsPerBlock;
// int *output = (int *)malloc(rows * sizeof(int));
int *output = NULL;
cudaMalloc((void **)&output, rows * sizeof(int));
cudaMemset(output, 0x00, rows * sizeof(int));
// printf("\n\n");
// Run our kernel on these block/grid dimensions
countSquares<<<blocksPerGrid, threadsPerBlock>>>( rows, cols, report, rowGrid, output);
if ( cudaGetLastError() != cudaSuccess )
fail( "Failure in CUDA kernel execution." );
// TODO Need to add code to copy the results list back to the host and
// add them up.
// int *squareNums = NULL;
int *mySquareNums = (int *)malloc(rows * sizeof(int));
// cudaMalloc((void **)&squareNums, rows * sizeof(int));
cudaMemcpy(mySquareNums, output, rows * sizeof(int), cudaMemcpyDeviceToHost);
// for (int i = 0; i < rows; i++) {
// *(output + i) = 0;
// mySquareNums[i] = 0;
// }
int total = 0;
for (int i = 0; i < rows; i++) {
total += mySquareNums[i];
}
printf( "Squares: %d\n", total );
cudaFree(rowGrid);
cudaFree(output);
free(mySquareNums);
// Free memory on the device and the host.
free( grid );
cudaDeviceReset(); //reset the device
return 0;
} | // Elapsed Real Time for input-4.txt: real 0m29.789s
#include <stdio.h>
#include <stdbool.h>
#include <hip/hip_runtime.h>
// Size of the square we're looking for.
#define SQUARE_WIDTH 6
#define SQUARE_HEIGHT 6
// Maximum width of a row. Makes it easier to allocate the whole
// grid contiguously.
#define MAX_WIDTH 16384
// Type used for a row of the grid. Makes it easier to declare the
// grid as a pointer.
typedef char Row[ MAX_WIDTH ];
// Kernel, run by each thread to count complete squares in parallel.
__global__ void countSquares( int rows, int cols, bool report, Row *grid, int *output) {
// Unique index for this worker.
int r0 = blockDim.x * blockIdx.x + threadIdx.x;
// Make sure I actually have something to work on.
if ( r0 + SQUARE_HEIGHT - 1 < rows ) {
int total = 0;
//TODO logic goes here
int startIdxCols = 0; //this is actually column
int endIdxCols = startIdxCols + 6;
int startIdxRows = r0; //so this is actually row
int endIdxRows = startIdxRows + 6;
int colidx = 0;
int rowidx = 0;
char square[6][6];
while (endIdxCols <= cols) {
//fill in square 2d array
for (int i = startIdxRows; i < endIdxRows; i++) {
for (int j = startIdxCols; j < endIdxCols; j++) {
square[rowidx][colidx] = grid[i][j];
// printf("%c %d %d\n", grid[i][j], i, j);
// printf("%c\n", square[rowidx][colidx]);
colidx++;
}
rowidx++;
colidx = 0;
}
rowidx = 0;
//at this point square is made
//so check if it's valid square
bool isValid = false;
char *knownLetters = (char *)malloc(26 * sizeof(char));
for (int i = 0; i < 26; i++) {
*(knownLetters + i) = '*';
}
int counter = 0;
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
bool exists = false;
for (int k = 0; k <= counter; k++) {
if (square[i][j] == *(knownLetters + k)) { //if letter is found
exists = true;
}
}
if (!exists) {
*(knownLetters + counter) = square[i][j];
counter++;
}
}
}
free(knownLetters);
if (counter == 26) {
isValid = true;
}
//End of boolean function
if (isValid) {
total++;
if (report) {
printf("%d %d\n", startIdxRows, startIdxCols);
}
}
endIdxCols++;
startIdxCols++;
} //end of while loop
*(output + r0) = total; //save the total number of squares to the unique index
// printf("Total squares is %d with thread ID %d\n", total, r0);
} //end of if statement
}
// Size of the grid of characters.
int rows, cols;
// Grid of letters.
Row *grid;
// Read the grid of characters.
void readGrid() {
// Read grdi dimensions.
scanf( "%d%d", &rows, &cols );
if ( cols > MAX_WIDTH ) {
fprintf( stderr, "Input grid is too wide.\n" );
exit( EXIT_FAILURE );
}
// Make space to store the grid as a big, contiguous array.
grid = (Row *) malloc( rows * sizeof( Row ) );
// Read each row of the grid as a string, then copy everything
// but the null terminator into the grid array.
int rowCount = 0;
char buffer[ MAX_WIDTH + 1 ];
while ( rowCount < rows ) {
scanf( "%s", buffer );
memcpy( grid[ rowCount++ ], buffer, cols );
}
}
// General function to report a failure and exit.
static void fail( char const *message ) {
fprintf( stderr, "%s\n", message );
exit( 1 );
}
// Print out a usage message, then exit.
static void usage() {
printf( "usage: square [report]\n" );
exit( 1 );
}
int main( int argc, char *argv[] ) {
// If there's an argument, it better be "report"
bool report = false;
if ( argc == 2 ) {
if ( strcmp( argv[ 1 ], "report" ) != 0 )
usage();
report = true;
}
// squareFound = false;
readGrid();
/** Array used to hold each squares found in each threads */
// int *reportedSquares = (int *)malloc(rows * sizeof(int));
// TODO Need to add code to allocate memory on the device and copy the grid
// over.
Row *rowGrid = NULL;
hipMalloc((void **)&rowGrid, rows * sizeof(Row)); //allocate memory for 2 arrays
hipMemcpy(rowGrid, grid, rows * sizeof(Row), hipMemcpyHostToDevice);
// Block and grid dimensions.
int threadsPerBlock = 250;
// Round up.
int blocksPerGrid = ( rows + threadsPerBlock - 1 ) / threadsPerBlock;
// int *output = (int *)malloc(rows * sizeof(int));
int *output = NULL;
hipMalloc((void **)&output, rows * sizeof(int));
hipMemset(output, 0x00, rows * sizeof(int));
// printf("\n\n");
// Run our kernel on these block/grid dimensions
countSquares<<<blocksPerGrid, threadsPerBlock>>>( rows, cols, report, rowGrid, output);
if ( hipGetLastError() != hipSuccess )
fail( "Failure in CUDA kernel execution." );
// TODO Need to add code to copy the results list back to the host and
// add them up.
// int *squareNums = NULL;
int *mySquareNums = (int *)malloc(rows * sizeof(int));
// cudaMalloc((void **)&squareNums, rows * sizeof(int));
hipMemcpy(mySquareNums, output, rows * sizeof(int), hipMemcpyDeviceToHost);
// for (int i = 0; i < rows; i++) {
// *(output + i) = 0;
// mySquareNums[i] = 0;
// }
int total = 0;
for (int i = 0; i < rows; i++) {
total += mySquareNums[i];
}
printf( "Squares: %d\n", total );
hipFree(rowGrid);
hipFree(output);
free(mySquareNums);
// Free memory on the device and the host.
free( grid );
hipDeviceReset(); //reset the device
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // Elapsed Real Time for input-4.txt: real 0m29.789s
#include <stdio.h>
#include <stdbool.h>
#include <hip/hip_runtime.h>
// Size of the square we're looking for.
#define SQUARE_WIDTH 6
#define SQUARE_HEIGHT 6
// Maximum width of a row. Makes it easier to allocate the whole
// grid contiguously.
#define MAX_WIDTH 16384
// Type used for a row of the grid. Makes it easier to declare the
// grid as a pointer.
typedef char Row[ MAX_WIDTH ];
// Kernel, run by each thread to count complete squares in parallel.
__global__ void countSquares( int rows, int cols, bool report, Row *grid, int *output) {
// Unique index for this worker.
int r0 = blockDim.x * blockIdx.x + threadIdx.x;
// Make sure I actually have something to work on.
if ( r0 + SQUARE_HEIGHT - 1 < rows ) {
int total = 0;
//TODO logic goes here
int startIdxCols = 0; //this is actually column
int endIdxCols = startIdxCols + 6;
int startIdxRows = r0; //so this is actually row
int endIdxRows = startIdxRows + 6;
int colidx = 0;
int rowidx = 0;
char square[6][6];
while (endIdxCols <= cols) {
//fill in square 2d array
for (int i = startIdxRows; i < endIdxRows; i++) {
for (int j = startIdxCols; j < endIdxCols; j++) {
square[rowidx][colidx] = grid[i][j];
// printf("%c %d %d\n", grid[i][j], i, j);
// printf("%c\n", square[rowidx][colidx]);
colidx++;
}
rowidx++;
colidx = 0;
}
rowidx = 0;
//at this point square is made
//so check if it's valid square
bool isValid = false;
char *knownLetters = (char *)malloc(26 * sizeof(char));
for (int i = 0; i < 26; i++) {
*(knownLetters + i) = '*';
}
int counter = 0;
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
bool exists = false;
for (int k = 0; k <= counter; k++) {
if (square[i][j] == *(knownLetters + k)) { //if letter is found
exists = true;
}
}
if (!exists) {
*(knownLetters + counter) = square[i][j];
counter++;
}
}
}
free(knownLetters);
if (counter == 26) {
isValid = true;
}
//End of boolean function
if (isValid) {
total++;
if (report) {
printf("%d %d\n", startIdxRows, startIdxCols);
}
}
endIdxCols++;
startIdxCols++;
} //end of while loop
*(output + r0) = total; //save the total number of squares to the unique index
// printf("Total squares is %d with thread ID %d\n", total, r0);
} //end of if statement
}
// Size of the grid of characters.
int rows, cols;
// Grid of letters.
Row *grid;
// Read the grid of characters.
void readGrid() {
// Read grdi dimensions.
scanf( "%d%d", &rows, &cols );
if ( cols > MAX_WIDTH ) {
fprintf( stderr, "Input grid is too wide.\n" );
exit( EXIT_FAILURE );
}
// Make space to store the grid as a big, contiguous array.
grid = (Row *) malloc( rows * sizeof( Row ) );
// Read each row of the grid as a string, then copy everything
// but the null terminator into the grid array.
int rowCount = 0;
char buffer[ MAX_WIDTH + 1 ];
while ( rowCount < rows ) {
scanf( "%s", buffer );
memcpy( grid[ rowCount++ ], buffer, cols );
}
}
// General function to report a failure and exit.
static void fail( char const *message ) {
fprintf( stderr, "%s\n", message );
exit( 1 );
}
// Print out a usage message, then exit.
static void usage() {
printf( "usage: square [report]\n" );
exit( 1 );
}
int main( int argc, char *argv[] ) {
// If there's an argument, it better be "report"
bool report = false;
if ( argc == 2 ) {
if ( strcmp( argv[ 1 ], "report" ) != 0 )
usage();
report = true;
}
// squareFound = false;
readGrid();
/** Array used to hold each squares found in each threads */
// int *reportedSquares = (int *)malloc(rows * sizeof(int));
// TODO Need to add code to allocate memory on the device and copy the grid
// over.
Row *rowGrid = NULL;
hipMalloc((void **)&rowGrid, rows * sizeof(Row)); //allocate memory for 2 arrays
hipMemcpy(rowGrid, grid, rows * sizeof(Row), hipMemcpyHostToDevice);
// Block and grid dimensions.
int threadsPerBlock = 250;
// Round up.
int blocksPerGrid = ( rows + threadsPerBlock - 1 ) / threadsPerBlock;
// int *output = (int *)malloc(rows * sizeof(int));
int *output = NULL;
hipMalloc((void **)&output, rows * sizeof(int));
hipMemset(output, 0x00, rows * sizeof(int));
// printf("\n\n");
// Run our kernel on these block/grid dimensions
countSquares<<<blocksPerGrid, threadsPerBlock>>>( rows, cols, report, rowGrid, output);
if ( hipGetLastError() != hipSuccess )
fail( "Failure in CUDA kernel execution." );
// TODO Need to add code to copy the results list back to the host and
// add them up.
// int *squareNums = NULL;
int *mySquareNums = (int *)malloc(rows * sizeof(int));
// cudaMalloc((void **)&squareNums, rows * sizeof(int));
hipMemcpy(mySquareNums, output, rows * sizeof(int), hipMemcpyDeviceToHost);
// for (int i = 0; i < rows; i++) {
// *(output + i) = 0;
// mySquareNums[i] = 0;
// }
int total = 0;
for (int i = 0; i < rows; i++) {
total += mySquareNums[i];
}
printf( "Squares: %d\n", total );
hipFree(rowGrid);
hipFree(output);
free(mySquareNums);
// Free memory on the device and the host.
free( grid );
hipDeviceReset(); //reset the device
return 0;
} | .text
.file "square.hip"
.globl _Z27__device_stub__countSquaresiibPA16384_cPi # -- Begin function _Z27__device_stub__countSquaresiibPA16384_cPi
.p2align 4, 0x90
.type _Z27__device_stub__countSquaresiibPA16384_cPi,@function
_Z27__device_stub__countSquaresiibPA16384_cPi: # @_Z27__device_stub__countSquaresiibPA16384_cPi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movb %dl, 7(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 7(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 64(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12countSquaresiibPA16384_cPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z27__device_stub__countSquaresiibPA16384_cPi, .Lfunc_end0-_Z27__device_stub__countSquaresiibPA16384_cPi
.cfi_endproc
# -- End function
.globl _Z8readGridv # -- Begin function _Z8readGridv
.p2align 4, 0x90
.type _Z8readGridv,@function
_Z8readGridv: # @_Z8readGridv
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $16400, %rsp # imm = 0x4010
.cfi_def_cfa_offset 16432
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $.L.str, %edi
movl $rows, %esi
movl $cols, %edx
xorl %eax, %eax
callq __isoc23_scanf
cmpl $16385, cols(%rip) # imm = 0x4001
jge .LBB1_5
# %bb.1:
movslq rows(%rip), %rbx
movq %rbx, %rdi
shlq $14, %rdi
callq malloc
movq %rax, grid(%rip)
testq %rbx, %rbx
jle .LBB1_4
# %bb.2: # %.lr.ph.preheader
movq %rsp, %rbx
xorl %r14d, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.2, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq __isoc23_scanf
incq %r15
movq grid(%rip), %rdi
addq %r14, %rdi
movslq cols(%rip), %rdx
movq %rbx, %rsi
callq memcpy@PLT
movslq rows(%rip), %rax
addq $16384, %r14 # imm = 0x4000
cmpq %rax, %r15
jl .LBB1_3
.LBB1_4: # %._crit_edge
addq $16400, %rsp # imm = 0x4010
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_5:
.cfi_def_cfa_offset 16432
movq stderr(%rip), %rcx
movl $.L.str.1, %edi
movl $24, %esi
movl $1, %edx
callq fwrite@PLT
movl $1, %edi
callq exit
.Lfunc_end1:
.size _Z8readGridv, .Lfunc_end1-_Z8readGridv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $136, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl %edi, %ebx
cmpl $2, %edi
jne .LBB2_2
# %bb.1:
movq 8(%rsi), %rdi
movl $.L.str.3, %esi
callq strcmp
testl %eax, %eax
jne .LBB2_10
.LBB2_2:
callq _Z8readGridv
movq $0, 16(%rsp)
movslq rows(%rip), %rsi
shlq $14, %rsi
leaq 16(%rsp), %rdi
callq hipMalloc
movq 16(%rsp), %rdi
movq grid(%rip), %rsi
movslq rows(%rip), %rdx
shlq $14, %rdx
movl $1, %ecx
callq hipMemcpy
movslq rows(%rip), %rsi
leal 249(%rsi), %eax
cltq
imulq $274877907, %rax, %r14 # imm = 0x10624DD3
movq %r14, %rax
shrq $63, %rax
sarq $36, %r14
addl %eax, %r14d
movq $0, 8(%rsp)
shlq $2, %rsi
leaq 8(%rsp), %rdi
callq hipMalloc
movq 8(%rsp), %rdi
movslq rows(%rip), %rdx
shlq $2, %rdx
xorl %esi, %esi
callq hipMemset
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %r14
orq $250, %rdx
movq %r14, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
cmpl $2, %ebx
movl rows(%rip), %eax
movl cols(%rip), %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movl %eax, 28(%rsp)
movl %ecx, 24(%rsp)
sete 7(%rsp)
movq %rdx, 88(%rsp)
movq %rsi, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 7(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12countSquaresiibPA16384_cPi, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
callq hipGetLastError
testl %eax, %eax
jne .LBB2_11
# %bb.5:
movslq rows(%rip), %r14
shlq $2, %r14
movq %r14, %rdi
callq malloc
movq %rax, %rbx
movq 8(%rsp), %rsi
movq %rax, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movl rows(%rip), %eax
testl %eax, %eax
jle .LBB2_6
# %bb.8: # %.lr.ph.preheader
xorl %ecx, %ecx
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_9: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addl (%rbx,%rcx,4), %esi
incq %rcx
cmpq %rcx, %rax
jne .LBB2_9
jmp .LBB2_7
.LBB2_6:
xorl %esi, %esi
.LBB2_7: # %._crit_edge
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq grid(%rip), %rdi
callq free
callq hipDeviceReset
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB2_11:
.cfi_def_cfa_offset 160
movq stderr(%rip), %rdi
movl $.L.str.7, %esi
movl $.L.str.4, %edx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.LBB2_10:
movl $.Lstr, %edi
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12countSquaresiibPA16384_cPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12countSquaresiibPA16384_cPi,@object # @_Z12countSquaresiibPA16384_cPi
.section .rodata,"a",@progbits
.globl _Z12countSquaresiibPA16384_cPi
.p2align 3, 0x0
_Z12countSquaresiibPA16384_cPi:
.quad _Z27__device_stub__countSquaresiibPA16384_cPi
.size _Z12countSquaresiibPA16384_cPi, 8
.type rows,@object # @rows
.bss
.globl rows
.p2align 2, 0x0
rows:
.long 0 # 0x0
.size rows, 4
.type cols,@object # @cols
.globl cols
.p2align 2, 0x0
cols:
.long 0 # 0x0
.size cols, 4
.type grid,@object # @grid
.globl grid
.p2align 3, 0x0
grid:
.quad 0
.size grid, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d%d"
.size .L.str, 5
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Input grid is too wide.\n"
.size .L.str.1, 25
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%s"
.size .L.str.2, 3
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "report"
.size .L.str.3, 7
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Failure in CUDA kernel execution."
.size .L.str.4, 34
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Squares: %d\n"
.size .L.str.5, 13
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%s\n"
.size .L.str.7, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12countSquaresiibPA16384_cPi"
.size .L__unnamed_1, 31
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "usage: square [report]"
.size .Lstr, 23
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__countSquaresiibPA16384_cPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12countSquaresiibPA16384_cPi
.addrsig_sym rows
.addrsig_sym cols
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00006561_00000000-6_square.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d%d"
.LC1:
.string "Input grid is too wide.\n"
.LC2:
.string "%s"
.text
.globl _Z8readGridv
.type _Z8readGridv, @function
_Z8readGridv:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
leaq -16384(%rsp), %r11
.cfi_def_cfa 11, 16416
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $16, %rsp
.cfi_def_cfa_offset 16432
movq %fs:40, %rax
movq %rax, 16392(%rsp)
xorl %eax, %eax
leaq cols(%rip), %rdx
leaq rows(%rip), %rsi
leaq .LC0(%rip), %rdi
call __isoc23_scanf@PLT
cmpl $16384, cols(%rip)
jg .L10
movl rows(%rip), %ebx
movslq %ebx, %rdi
salq $14, %rdi
call malloc@PLT
movq %rax, grid(%rip)
testl %ebx, %ebx
jle .L3
movl $0, %ebx
leaq .LC2(%rip), %r12
.L6:
movq %rsp, %rbp
movq %rbp, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movq %rbx, %rdi
salq $14, %rdi
addq grid(%rip), %rdi
movslq cols(%rip), %rdx
movq %rbp, %rsi
call memcpy@PLT
addq $1, %rbx
cmpl %ebx, rows(%rip)
jg .L6
.L3:
movq 16392(%rsp), %rax
subq %fs:40, %rax
jne .L11
addq $16400, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L11:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z8readGridv, .-_Z8readGridv
.globl _Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi
.type _Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi, @function
_Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rcx, 8(%rsp)
movq %r8, (%rsp)
movb %dl, 20(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12countSquaresiibPA16384_cPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L12
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi, .-_Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi
.globl _Z12countSquaresiibPA16384_cPi
.type _Z12countSquaresiibPA16384_cPi, @function
_Z12countSquaresiibPA16384_cPi:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movzbl %dl, %edx
call _Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z12countSquaresiibPA16384_cPi, .-_Z12countSquaresiibPA16384_cPi
.section .rodata.str1.1
.LC3:
.string "report"
.LC4:
.string "usage: square [report]\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "Failure in CUDA kernel execution."
.section .rodata.str1.1
.LC6:
.string "%s\n"
.LC7:
.string "Squares: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $56, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
cmpl $2, %edi
jne .L27
movq 8(%rsi), %rdi
leaq .LC3(%rip), %rsi
call strcmp@PLT
movl $1, %ebp
testl %eax, %eax
jne .L32
.L21:
call _Z8readGridv
movq $0, (%rsp)
movslq rows(%rip), %rsi
salq $14, %rsi
movq %rsp, %rdi
call cudaMalloc@PLT
movslq rows(%rip), %rdx
salq $14, %rdx
movl $1, %ecx
movq grid(%rip), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl rows(%rip), %ebx
movq $0, 8(%rsp)
movslq %ebx, %rsi
salq $2, %rsi
leaq 8(%rsp), %rdi
call cudaMalloc@PLT
movslq rows(%rip), %rdx
salq $2, %rdx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
movl $250, 28(%rsp)
movl $1, 32(%rsp)
addl $249, %ebx
movslq %ebx, %rax
imulq $274877907, %rax, %rax
sarq $36, %rax
sarl $31, %ebx
subl %ebx, %eax
movl %eax, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L33
.L22:
call cudaGetLastError@PLT
testl %eax, %eax
jne .L34
movslq rows(%rip), %rbp
salq $2, %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, %rbx
movl $2, %ecx
movq %rbp, %rdx
movq 8(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
movl rows(%rip), %edx
testl %edx, %edx
jle .L29
movq %rbx, %rax
movslq %edx, %rdx
leaq (%rbx,%rdx,4), %rcx
movl $0, %edx
.L25:
addl (%rax), %edx
addq $4, %rax
cmpq %rcx, %rax
jne .L25
.L24:
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq grid(%rip), %rdi
call free@PLT
call cudaDeviceReset@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L35
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L32:
.cfi_restore_state
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L27:
movl $0, %ebp
jmp .L21
.L33:
movzbl %bpl, %edx
movq 8(%rsp), %r8
movq (%rsp), %rcx
movl cols(%rip), %esi
movl rows(%rip), %edi
call _Z44__device_stub__Z12countSquaresiibPA16384_cPiiibPA16384_cPi
jmp .L22
.L34:
leaq .LC5(%rip), %rcx
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L29:
movl $0, %edx
jmp .L24
.L35:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC8:
.string "_Z12countSquaresiibPA16384_cPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z12countSquaresiibPA16384_cPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl grid
.bss
.align 8
.type grid, @object
.size grid, 8
grid:
.zero 8
.globl cols
.align 4
.type cols, @object
.size cols, 4
cols:
.zero 4
.globl rows
.align 4
.type rows, @object
.size rows, 4
rows:
.zero 4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "square.hip"
.globl _Z27__device_stub__countSquaresiibPA16384_cPi # -- Begin function _Z27__device_stub__countSquaresiibPA16384_cPi
.p2align 4, 0x90
.type _Z27__device_stub__countSquaresiibPA16384_cPi,@function
_Z27__device_stub__countSquaresiibPA16384_cPi: # @_Z27__device_stub__countSquaresiibPA16384_cPi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movb %dl, 7(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 7(%rsp), %rax
movq %rax, 96(%rsp)
leaq 72(%rsp), %rax
movq %rax, 104(%rsp)
leaq 64(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12countSquaresiibPA16384_cPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z27__device_stub__countSquaresiibPA16384_cPi, .Lfunc_end0-_Z27__device_stub__countSquaresiibPA16384_cPi
.cfi_endproc
# -- End function
.globl _Z8readGridv # -- Begin function _Z8readGridv
.p2align 4, 0x90
.type _Z8readGridv,@function
_Z8readGridv: # @_Z8readGridv
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $16400, %rsp # imm = 0x4010
.cfi_def_cfa_offset 16432
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $.L.str, %edi
movl $rows, %esi
movl $cols, %edx
xorl %eax, %eax
callq __isoc23_scanf
cmpl $16385, cols(%rip) # imm = 0x4001
jge .LBB1_5
# %bb.1:
movslq rows(%rip), %rbx
movq %rbx, %rdi
shlq $14, %rdi
callq malloc
movq %rax, grid(%rip)
testq %rbx, %rbx
jle .LBB1_4
# %bb.2: # %.lr.ph.preheader
movq %rsp, %rbx
xorl %r14d, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.2, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq __isoc23_scanf
incq %r15
movq grid(%rip), %rdi
addq %r14, %rdi
movslq cols(%rip), %rdx
movq %rbx, %rsi
callq memcpy@PLT
movslq rows(%rip), %rax
addq $16384, %r14 # imm = 0x4000
cmpq %rax, %r15
jl .LBB1_3
.LBB1_4: # %._crit_edge
addq $16400, %rsp # imm = 0x4010
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_5:
.cfi_def_cfa_offset 16432
movq stderr(%rip), %rcx
movl $.L.str.1, %edi
movl $24, %esi
movl $1, %edx
callq fwrite@PLT
movl $1, %edi
callq exit
.Lfunc_end1:
.size _Z8readGridv, .Lfunc_end1-_Z8readGridv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $136, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl %edi, %ebx
cmpl $2, %edi
jne .LBB2_2
# %bb.1:
movq 8(%rsi), %rdi
movl $.L.str.3, %esi
callq strcmp
testl %eax, %eax
jne .LBB2_10
.LBB2_2:
callq _Z8readGridv
movq $0, 16(%rsp)
movslq rows(%rip), %rsi
shlq $14, %rsi
leaq 16(%rsp), %rdi
callq hipMalloc
movq 16(%rsp), %rdi
movq grid(%rip), %rsi
movslq rows(%rip), %rdx
shlq $14, %rdx
movl $1, %ecx
callq hipMemcpy
movslq rows(%rip), %rsi
leal 249(%rsi), %eax
cltq
imulq $274877907, %rax, %r14 # imm = 0x10624DD3
movq %r14, %rax
shrq $63, %rax
sarq $36, %r14
addl %eax, %r14d
movq $0, 8(%rsp)
shlq $2, %rsi
leaq 8(%rsp), %rdi
callq hipMalloc
movq 8(%rsp), %rdi
movslq rows(%rip), %rdx
shlq $2, %rdx
xorl %esi, %esi
callq hipMemset
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %r14
orq $250, %rdx
movq %r14, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
cmpl $2, %ebx
movl rows(%rip), %eax
movl cols(%rip), %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movl %eax, 28(%rsp)
movl %ecx, 24(%rsp)
sete 7(%rsp)
movq %rdx, 88(%rsp)
movq %rsi, 80(%rsp)
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 7(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12countSquaresiibPA16384_cPi, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
callq hipGetLastError
testl %eax, %eax
jne .LBB2_11
# %bb.5:
movslq rows(%rip), %r14
shlq $2, %r14
movq %r14, %rdi
callq malloc
movq %rax, %rbx
movq 8(%rsp), %rsi
movq %rax, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movl rows(%rip), %eax
testl %eax, %eax
jle .LBB2_6
# %bb.8: # %.lr.ph.preheader
xorl %ecx, %ecx
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_9: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addl (%rbx,%rcx,4), %esi
incq %rcx
cmpq %rcx, %rax
jne .LBB2_9
jmp .LBB2_7
.LBB2_6:
xorl %esi, %esi
.LBB2_7: # %._crit_edge
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq grid(%rip), %rdi
callq free
callq hipDeviceReset
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB2_11:
.cfi_def_cfa_offset 160
movq stderr(%rip), %rdi
movl $.L.str.7, %esi
movl $.L.str.4, %edx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.LBB2_10:
movl $.Lstr, %edi
callq puts@PLT
movl $1, %edi
callq exit
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12countSquaresiibPA16384_cPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12countSquaresiibPA16384_cPi,@object # @_Z12countSquaresiibPA16384_cPi
.section .rodata,"a",@progbits
.globl _Z12countSquaresiibPA16384_cPi
.p2align 3, 0x0
_Z12countSquaresiibPA16384_cPi:
.quad _Z27__device_stub__countSquaresiibPA16384_cPi
.size _Z12countSquaresiibPA16384_cPi, 8
.type rows,@object # @rows
.bss
.globl rows
.p2align 2, 0x0
rows:
.long 0 # 0x0
.size rows, 4
.type cols,@object # @cols
.globl cols
.p2align 2, 0x0
cols:
.long 0 # 0x0
.size cols, 4
.type grid,@object # @grid
.globl grid
.p2align 3, 0x0
grid:
.quad 0
.size grid, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d%d"
.size .L.str, 5
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Input grid is too wide.\n"
.size .L.str.1, 25
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%s"
.size .L.str.2, 3
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "report"
.size .L.str.3, 7
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Failure in CUDA kernel execution."
.size .L.str.4, 34
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Squares: %d\n"
.size .L.str.5, 13
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%s\n"
.size .L.str.7, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12countSquaresiibPA16384_cPi"
.size .L__unnamed_1, 31
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "usage: square [report]"
.size .Lstr, 23
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__countSquaresiibPA16384_cPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12countSquaresiibPA16384_cPi
.addrsig_sym rows
.addrsig_sym cols
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
// Part 1 of 6: implement the kernel
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
int sizeVec = blockDim.x;
__shared__ float sdata[1024];
int g_index = threadIdx.x + blockDim.x * blockIdx.x;
int s_index = threadIdx.x;
sdata[s_index] = input[g_index];
__syncthreads();
while (sizeVec!=1){
if (s_index < sizeVec/2)
sdata[s_index] += sdata[sizeVec - 1 - s_index];
__syncthreads();
sizeVec /=2;
}
if(s_index == 0)
atomicAdd(per_block_results, sdata[0]);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(void)
{
// create array of 256ki elements
const int num_elements = 1<<18;
srand(time(NULL));
// generate random input on the host
std::vector<float> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i)
{
h_input[i] = random_float();
}
const float host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f);
std::cerr << "Host sum: " << host_result << std::endl;
int block_size = 1024;
int num_blocks = (num_elements + block_size - 1)/block_size;
//Part 1 of 6: move input to device memory
float *d_input = 0;
cudaMalloc(&d_input, num_elements * sizeof(float));
cudaMemcpy(d_input, &h_input[0], num_elements * sizeof(float), cudaMemcpyHostToDevice);
// Part 1 of 6: allocate the partial sums: How much space does it need?
float *d_partial_sums_and_total = 0;
cudaMalloc(&d_partial_sums_and_total, sizeof(float));
// Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need?
block_sum<<<num_blocks, block_size, block_size * sizeof(float)>>>(d_input, d_partial_sums_and_total, num_elements);
// Part 1 of 6: compute the sum of the partial sums
//block_sum<<<1, num_blocks, num_blocks * sizeof(float)>>>(d_partial_sums_and_total, d_partial_sums_and_total, num_blocks);
// Part 1 of 6: copy the result back to the host
float device_result = 0;
cudaMemcpy(&device_result, d_partial_sums_and_total, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Device sum: " << device_result << std::endl;
// Part 1 of 6: deallocate device memory
cudaFree(d_input);
cudaFree(d_partial_sums_and_total);
return 0;
} | code for sm_80
Function : _Z9block_sumPKfPfm
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R7 ; /* 0x0000000002027a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe40000000f00 */
/*0090*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe40003f05270 */
/*00a0*/ ISETP.NE.AND P1, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe20003f25270 */
/*00b0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00d0*/ @!P1 BRA 0x1d0 ; /* 0x000000f000009947 */
/* 0x000fea0003800000 */
/*00e0*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */
/* 0x001fe200078e00ff */
/*00f0*/ LOP3.LUT R3, RZ, R7, RZ, 0x33, !PT ; /* 0x00000007ff037212 */
/* 0x000fc800078e33ff */
/*0100*/ LEA.HI R2, R0, R0, RZ, 0x1 ; /* 0x0000000000027211 */
/* 0x000fc800078f08ff */
/*0110*/ SHF.R.S32.HI R6, RZ, 0x1, R2 ; /* 0x00000001ff067819 */
/* 0x000fc80000011402 */
/*0120*/ ISETP.GE.AND P1, PT, R7, R6, PT ; /* 0x000000060700720c */
/* 0x000fda0003f26270 */
/*0130*/ @!P1 IMAD.IADD R2, R3, 0x1, R0 ; /* 0x0000000103029824 */
/* 0x000fe200078e0200 */
/*0140*/ @!P1 LDS R4, [R7.X4] ; /* 0x0000000007049984 */
/* 0x000fe20000004800 */
/*0150*/ LOP3.LUT R0, R0, 0xfffffffe, RZ, 0xc0, !PT ; /* 0xfffffffe00007812 */
/* 0x000fc600078ec0ff */
/*0160*/ @!P1 LDS R5, [R2.X4] ; /* 0x0000000002059984 */
/* 0x000e240000004800 */
/*0170*/ @!P1 FADD R4, R4, R5 ; /* 0x0000000504049221 */
/* 0x001fca0000000000 */
/*0180*/ @!P1 STS [R7.X4], R4 ; /* 0x0000000407009388 */
/* 0x0001e80000004800 */
/*0190*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01a0*/ ISETP.NE.AND P1, PT, R0, 0x2, PT ; /* 0x000000020000780c */
/* 0x000fe40003f25270 */
/*01b0*/ MOV R0, R6 ; /* 0x0000000600007202 */
/* 0x000fd60000000f00 */
/*01c0*/ @P1 BRA 0x100 ; /* 0xffffff3000001947 */
/* 0x001fea000383ffff */
/*01d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01e0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01f0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff027624 */
/* 0x000fe200078e00ff */
/*0200*/ MOV R3, c[0x0][0x16c] ; /* 0x00005b0000037a02 */
/* 0x000fca0000000f00 */
/*0210*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R2.64], R5 ; /* 0x000000050200798e */
/* 0x001fe2000c10e784 */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
// Part 1 of 6: implement the kernel
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
int sizeVec = blockDim.x;
__shared__ float sdata[1024];
int g_index = threadIdx.x + blockDim.x * blockIdx.x;
int s_index = threadIdx.x;
sdata[s_index] = input[g_index];
__syncthreads();
while (sizeVec!=1){
if (s_index < sizeVec/2)
sdata[s_index] += sdata[sizeVec - 1 - s_index];
__syncthreads();
sizeVec /=2;
}
if(s_index == 0)
atomicAdd(per_block_results, sdata[0]);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(void)
{
// create array of 256ki elements
const int num_elements = 1<<18;
srand(time(NULL));
// generate random input on the host
std::vector<float> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i)
{
h_input[i] = random_float();
}
const float host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f);
std::cerr << "Host sum: " << host_result << std::endl;
int block_size = 1024;
int num_blocks = (num_elements + block_size - 1)/block_size;
//Part 1 of 6: move input to device memory
float *d_input = 0;
cudaMalloc(&d_input, num_elements * sizeof(float));
cudaMemcpy(d_input, &h_input[0], num_elements * sizeof(float), cudaMemcpyHostToDevice);
// Part 1 of 6: allocate the partial sums: How much space does it need?
float *d_partial_sums_and_total = 0;
cudaMalloc(&d_partial_sums_and_total, sizeof(float));
// Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need?
block_sum<<<num_blocks, block_size, block_size * sizeof(float)>>>(d_input, d_partial_sums_and_total, num_elements);
// Part 1 of 6: compute the sum of the partial sums
//block_sum<<<1, num_blocks, num_blocks * sizeof(float)>>>(d_partial_sums_and_total, d_partial_sums_and_total, num_blocks);
// Part 1 of 6: copy the result back to the host
float device_result = 0;
cudaMemcpy(&device_result, d_partial_sums_and_total, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Device sum: " << device_result << std::endl;
// Part 1 of 6: deallocate device memory
cudaFree(d_input);
cudaFree(d_partial_sums_and_total);
return 0;
} | .file "tmpxft_000eb491_00000000-6_cumulative_sum.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4075:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12random_floatv
.type _Z12random_floatv, @function
_Z12random_floatv:
.LFB4071:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4071:
.size _Z12random_floatv, .-_Z12random_floatv
.globl _Z32__device_stub__Z9block_sumPKfPfmPKfPfm
.type _Z32__device_stub__Z9block_sumPKfPfmPKfPfm, @function
_Z32__device_stub__Z9block_sumPKfPfmPKfPfm:
.LFB4097:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9block_sumPKfPfm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4097:
.size _Z32__device_stub__Z9block_sumPKfPfmPKfPfm, .-_Z32__device_stub__Z9block_sumPKfPfmPKfPfm
.globl _Z9block_sumPKfPfm
.type _Z9block_sumPKfPfm, @function
_Z9block_sumPKfPfm:
.LFB4098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9block_sumPKfPfmPKfPfm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4098:
.size _Z9block_sumPKfPfm, .-_Z9block_sumPKfPfm
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z9block_sumPKfPfm"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9block_sumPKfPfm(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4100:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt6vectorIfSaIfEED2Ev,"axG",@progbits,_ZNSt6vectorIfSaIfEED5Ev,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEED2Ev
.type _ZNSt6vectorIfSaIfEED2Ev, @function
_ZNSt6vectorIfSaIfEED2Ev:
.LFB4410:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L18
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L18:
ret
.cfi_endproc
.LFE4410:
.size _ZNSt6vectorIfSaIfEED2Ev, .-_ZNSt6vectorIfSaIfEED2Ev
.weak _ZNSt6vectorIfSaIfEED1Ev
.set _ZNSt6vectorIfSaIfEED1Ev,_ZNSt6vectorIfSaIfEED2Ev
.section .rodata.str1.1
.LC3:
.string "Host sum: "
.LC4:
.string "Device sum: "
.text
.globl main
.type main, @function
main:
.LFB4072:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4072
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $104, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl $1048576, %edi
.LEHB0:
call _Znwm@PLT
.LEHE0:
movq %rax, %r13
movq %rax, 64(%rsp)
leaq 1048576(%rax), %rdx
movq %rdx, 80(%rsp)
movl $0x00000000, (%rax)
leaq 4(%rax), %rax
.L22:
movl $0x00000000, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L22
movq %rdx, 72(%rsp)
movq %r13, %rbx
leaq 1048576(%r13), %r12
movq %r13, %rbp
.L23:
call _Z12random_floatv
movss %xmm0, 0(%rbp)
addq $4, %rbp
cmpq %rbp, %r12
jne .L23
movl $0x00000000, 12(%rsp)
.L24:
movss 12(%rsp), %xmm1
addss (%rbx), %xmm1
movss %xmm1, 12(%rsp)
addq $4, %rbx
cmpq %r12, %rbx
jne .L24
leaq .LC3(%rip), %rsi
leaq _ZSt4cerr(%rip), %rdi
.LEHB1:
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq $0, 24(%rsp)
leaq 24(%rsp), %rdi
movl $1048576, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $1048576, %edx
movq %r13, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movq $0, 32(%rsp)
leaq 32(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1024, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $256, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $4096, %r8d
movq 52(%rsp), %rdx
movl $1, %ecx
movq 40(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L25
movl $262144, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z32__device_stub__Z9block_sumPKfPfmPKfPfm
.L25:
movl $0x00000000, 52(%rsp)
leaq 52(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC4(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 52(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
.LEHE1:
leaq 64(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L35
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
endbr64
movq %rax, %rbx
leaq 64(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
movq 88(%rsp), %rax
subq %fs:40, %rax
je .L27
call __stack_chk_fail@PLT
.L27:
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L35:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4072:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA4072:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4072-.LLSDACSB4072
.LLSDACSB4072:
.uleb128 .LEHB0-.LFB4072
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB4072
.uleb128 .LEHE1-.LEHB1
.uleb128 .L29-.LFB4072
.uleb128 0
.uleb128 .LEHB2-.LFB4072
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.LLSDACSE4072:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
// Part 1 of 6: implement the kernel
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
int sizeVec = blockDim.x;
__shared__ float sdata[1024];
int g_index = threadIdx.x + blockDim.x * blockIdx.x;
int s_index = threadIdx.x;
sdata[s_index] = input[g_index];
__syncthreads();
while (sizeVec!=1){
if (s_index < sizeVec/2)
sdata[s_index] += sdata[sizeVec - 1 - s_index];
__syncthreads();
sizeVec /=2;
}
if(s_index == 0)
atomicAdd(per_block_results, sdata[0]);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(void)
{
// create array of 256ki elements
const int num_elements = 1<<18;
srand(time(NULL));
// generate random input on the host
std::vector<float> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i)
{
h_input[i] = random_float();
}
const float host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f);
std::cerr << "Host sum: " << host_result << std::endl;
int block_size = 1024;
int num_blocks = (num_elements + block_size - 1)/block_size;
//Part 1 of 6: move input to device memory
float *d_input = 0;
cudaMalloc(&d_input, num_elements * sizeof(float));
cudaMemcpy(d_input, &h_input[0], num_elements * sizeof(float), cudaMemcpyHostToDevice);
// Part 1 of 6: allocate the partial sums: How much space does it need?
float *d_partial_sums_and_total = 0;
cudaMalloc(&d_partial_sums_and_total, sizeof(float));
// Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need?
block_sum<<<num_blocks, block_size, block_size * sizeof(float)>>>(d_input, d_partial_sums_and_total, num_elements);
// Part 1 of 6: compute the sum of the partial sums
//block_sum<<<1, num_blocks, num_blocks * sizeof(float)>>>(d_partial_sums_and_total, d_partial_sums_and_total, num_blocks);
// Part 1 of 6: copy the result back to the host
float device_result = 0;
cudaMemcpy(&device_result, d_partial_sums_and_total, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Device sum: " << device_result << std::endl;
// Part 1 of 6: deallocate device memory
cudaFree(d_input);
cudaFree(d_partial_sums_and_total);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
// Part 1 of 6: implement the kernel
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
int sizeVec = blockDim.x;
__shared__ float sdata[1024];
int g_index = threadIdx.x + blockDim.x * blockIdx.x;
int s_index = threadIdx.x;
sdata[s_index] = input[g_index];
__syncthreads();
while (sizeVec!=1){
if (s_index < sizeVec/2)
sdata[s_index] += sdata[sizeVec - 1 - s_index];
__syncthreads();
sizeVec /=2;
}
if(s_index == 0)
atomicAdd(per_block_results, sdata[0]);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(void)
{
// create array of 256ki elements
const int num_elements = 1<<18;
srand(time(NULL));
// generate random input on the host
std::vector<float> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i)
{
h_input[i] = random_float();
}
const float host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f);
std::cerr << "Host sum: " << host_result << std::endl;
int block_size = 1024;
int num_blocks = (num_elements + block_size - 1)/block_size;
//Part 1 of 6: move input to device memory
float *d_input = 0;
hipMalloc(&d_input, num_elements * sizeof(float));
hipMemcpy(d_input, &h_input[0], num_elements * sizeof(float), hipMemcpyHostToDevice);
// Part 1 of 6: allocate the partial sums: How much space does it need?
float *d_partial_sums_and_total = 0;
hipMalloc(&d_partial_sums_and_total, sizeof(float));
// Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need?
block_sum<<<num_blocks, block_size, block_size * sizeof(float)>>>(d_input, d_partial_sums_and_total, num_elements);
// Part 1 of 6: compute the sum of the partial sums
//block_sum<<<1, num_blocks, num_blocks * sizeof(float)>>>(d_partial_sums_and_total, d_partial_sums_and_total, num_blocks);
// Part 1 of 6: copy the result back to the host
float device_result = 0;
hipMemcpy(&device_result, d_partial_sums_and_total, sizeof(float), hipMemcpyDeviceToHost);
std::cout << "Device sum: " << device_result << std::endl;
// Part 1 of 6: deallocate device memory
hipFree(d_input);
hipFree(d_partial_sums_and_total);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
// Part 1 of 6: implement the kernel
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
int sizeVec = blockDim.x;
__shared__ float sdata[1024];
int g_index = threadIdx.x + blockDim.x * blockIdx.x;
int s_index = threadIdx.x;
sdata[s_index] = input[g_index];
__syncthreads();
while (sizeVec!=1){
if (s_index < sizeVec/2)
sdata[s_index] += sdata[sizeVec - 1 - s_index];
__syncthreads();
sizeVec /=2;
}
if(s_index == 0)
atomicAdd(per_block_results, sdata[0]);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(void)
{
// create array of 256ki elements
const int num_elements = 1<<18;
srand(time(NULL));
// generate random input on the host
std::vector<float> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i)
{
h_input[i] = random_float();
}
const float host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f);
std::cerr << "Host sum: " << host_result << std::endl;
int block_size = 1024;
int num_blocks = (num_elements + block_size - 1)/block_size;
//Part 1 of 6: move input to device memory
float *d_input = 0;
hipMalloc(&d_input, num_elements * sizeof(float));
hipMemcpy(d_input, &h_input[0], num_elements * sizeof(float), hipMemcpyHostToDevice);
// Part 1 of 6: allocate the partial sums: How much space does it need?
float *d_partial_sums_and_total = 0;
hipMalloc(&d_partial_sums_and_total, sizeof(float));
// Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need?
block_sum<<<num_blocks, block_size, block_size * sizeof(float)>>>(d_input, d_partial_sums_and_total, num_elements);
// Part 1 of 6: compute the sum of the partial sums
//block_sum<<<1, num_blocks, num_blocks * sizeof(float)>>>(d_partial_sums_and_total, d_partial_sums_and_total, num_blocks);
// Part 1 of 6: copy the result back to the host
float device_result = 0;
hipMemcpy(&device_result, d_partial_sums_and_total, sizeof(float), hipMemcpyDeviceToHost);
std::cout << "Device sum: " << device_result << std::endl;
// Part 1 of 6: deallocate device memory
hipFree(d_input);
hipFree(d_partial_sums_and_total);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9block_sumPKfPfm
.globl _Z9block_sumPKfPfm
.p2align 8
.type _Z9block_sumPKfPfm,@function
_Z9block_sumPKfPfm:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_cmp_eq_u32 s2, 1
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_5
v_not_b32_e32 v2, v0
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s4
s_and_b32 s2, s2, 0xfffe
s_waitcnt lgkmcnt(0)
s_cmp_lg_u32 s2, 2
s_mov_b32 s2, s3
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_5
.LBB0_3:
s_lshr_b32 s3, s2, 1
s_mov_b32 s4, exec_lo
v_cmpx_gt_u32_e64 s3, v0
s_cbranch_execz .LBB0_2
v_add_lshl_u32 v3, s2, v2, 2
ds_load_b32 v3, v3
ds_load_b32 v4, v1
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v3, v3, v4
ds_store_b32 v1, v3
s_branch .LBB0_2
.LBB0_5:
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_9
s_mov_b32 s2, exec_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mbcnt_lo_u32_b32 v0, s2, 0
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_and_b32 s3, exec_lo, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 exec_lo, s3
s_cbranch_execz .LBB0_9
s_load_b64 s[0:1], s[0:1], 0x8
s_bcnt1_i32_b32 s2, s2
s_delay_alu instid0(SALU_CYCLE_1)
v_cvt_f32_ubyte0_e32 v1, s2
v_mov_b32_e32 v2, 0
s_mov_b32 s2, 0
ds_load_b32 v0, v2
s_waitcnt lgkmcnt(0)
s_load_b32 s3, s[0:1], 0x0
v_mul_f32_e32 v3, v0, v1
s_waitcnt lgkmcnt(0)
v_mov_b32_e32 v1, s3
.LBB0_8:
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v0, v1, v3
global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v0, v1
v_mov_b32_e32 v1, v0
s_or_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB0_8
.LBB0_9:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9block_sumPKfPfm
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9block_sumPKfPfm, .Lfunc_end0-_Z9block_sumPKfPfm
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9block_sumPKfPfm
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9block_sumPKfPfm.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
// Part 1 of 6: implement the kernel
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
int sizeVec = blockDim.x;
__shared__ float sdata[1024];
int g_index = threadIdx.x + blockDim.x * blockIdx.x;
int s_index = threadIdx.x;
sdata[s_index] = input[g_index];
__syncthreads();
while (sizeVec!=1){
if (s_index < sizeVec/2)
sdata[s_index] += sdata[sizeVec - 1 - s_index];
__syncthreads();
sizeVec /=2;
}
if(s_index == 0)
atomicAdd(per_block_results, sdata[0]);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(void)
{
// create array of 256ki elements
const int num_elements = 1<<18;
srand(time(NULL));
// generate random input on the host
std::vector<float> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i)
{
h_input[i] = random_float();
}
const float host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f);
std::cerr << "Host sum: " << host_result << std::endl;
int block_size = 1024;
int num_blocks = (num_elements + block_size - 1)/block_size;
//Part 1 of 6: move input to device memory
float *d_input = 0;
hipMalloc(&d_input, num_elements * sizeof(float));
hipMemcpy(d_input, &h_input[0], num_elements * sizeof(float), hipMemcpyHostToDevice);
// Part 1 of 6: allocate the partial sums: How much space does it need?
float *d_partial_sums_and_total = 0;
hipMalloc(&d_partial_sums_and_total, sizeof(float));
// Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need?
block_sum<<<num_blocks, block_size, block_size * sizeof(float)>>>(d_input, d_partial_sums_and_total, num_elements);
// Part 1 of 6: compute the sum of the partial sums
//block_sum<<<1, num_blocks, num_blocks * sizeof(float)>>>(d_partial_sums_and_total, d_partial_sums_and_total, num_blocks);
// Part 1 of 6: copy the result back to the host
float device_result = 0;
hipMemcpy(&device_result, d_partial_sums_and_total, sizeof(float), hipMemcpyDeviceToHost);
std::cout << "Device sum: " << device_result << std::endl;
// Part 1 of 6: deallocate device memory
hipFree(d_input);
hipFree(d_partial_sums_and_total);
return 0;
} | .text
.file "cumulative_sum.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z12random_floatv
.LCPI0_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl _Z12random_floatv
.p2align 4, 0x90
.type _Z12random_floatv,@function
_Z12random_floatv: # @_Z12random_floatv
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq rand
cvtsi2ss %eax, %xmm0
mulss .LCPI0_0(%rip), %xmm0
popq %rax
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z12random_floatv, .Lfunc_end0-_Z12random_floatv
.cfi_endproc
# -- End function
.globl _Z24__device_stub__block_sumPKfPfm # -- Begin function _Z24__device_stub__block_sumPKfPfm
.p2align 4, 0x90
.type _Z24__device_stub__block_sumPKfPfm,@function
_Z24__device_stub__block_sumPKfPfm: # @_Z24__device_stub__block_sumPKfPfm
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9block_sumPKfPfm, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z24__device_stub__block_sumPKfPfm, .Lfunc_end1-_Z24__device_stub__block_sumPKfPfm
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI2_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0: # %_ZNSt6vectorIfSaIfEEC2EmRKS0_.exit
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $128, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
.cfi_escape 0x2e, 0x00
xorl %r14d, %r14d
xorl %edi, %edi
callq time
.cfi_escape 0x2e, 0x00
movl %eax, %edi
callq srand
.cfi_escape 0x2e, 0x00
movl $1048576, %edi # imm = 0x100000
callq _Znwm
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl $1048576, %edx # imm = 0x100000
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
.cfi_escape 0x2e, 0x00
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI2_0(%rip), %xmm0
movss %xmm0, (%rbx,%r14,4)
incq %r14
cmpq $262144, %r14 # imm = 0x40000
jne .LBB2_1
# %bb.2: # %.lr.ph.i.preheader
xorps %xmm0, %xmm0
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
addss (%rbx,%rax), %xmm0
addq $4, %rax
cmpq $1048576, %rax # imm = 0x100000
jne .LBB2_3
# %bb.4: # %_ZSt10accumulateIN9__gnu_cxx17__normal_iteratorIPfSt6vectorIfSaIfEEEEfET0_T_S8_S7_.exit
.Ltmp0:
movss %xmm0, 28(%rsp) # 4-byte Spill
.cfi_escape 0x2e, 0x00
movl $_ZSt4cerr, %edi
movl $.L.str, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp1:
# %bb.5: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movss 28(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
.Ltmp2:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cerr, %edi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp3:
# %bb.6: # %_ZNSolsEf.exit
movq %rax, %r14
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%r14,%rax), %r15
testq %r15, %r15
je .LBB2_7
# %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r15)
je .LBB2_11
# %bb.10:
movzbl 67(%r15), %eax
jmp .LBB2_13
.LBB2_11:
.Ltmp4:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp5:
# %bb.12: # %.noexc37
movq (%r15), %rax
.Ltmp6:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp7:
.LBB2_13: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i
.Ltmp8:
.cfi_escape 0x2e, 0x00
movsbl %al, %esi
movq %r14, %rdi
callq _ZNSo3putEc
.Ltmp9:
# %bb.14: # %.noexc39
.Ltmp10:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp11:
# %bb.15: # %_ZNSolsEPFRSoS_E.exit
movq $0, 16(%rsp)
.Ltmp12:
.cfi_escape 0x2e, 0x00
leaq 16(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
callq hipMalloc
.Ltmp13:
# %bb.16: # %_ZL9hipMallocIfE10hipError_tPPT_m.exit
movq 16(%rsp), %rdi
.Ltmp14:
.cfi_escape 0x2e, 0x00
movl $1048576, %edx # imm = 0x100000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
.Ltmp15:
# %bb.17:
movq $0, 8(%rsp)
.Ltmp17:
.cfi_escape 0x2e, 0x00
leaq 8(%rsp), %rdi
movl $4, %esi
callq hipMalloc
.Ltmp18:
# %bb.18: # %_ZL9hipMallocIfE10hipError_tPPT_m.exit25
.Ltmp19:
.cfi_escape 0x2e, 0x00
movabsq $4294967552, %rdi # imm = 0x100000100
movabsq $4294968320, %rdx # imm = 0x100000400
movl $4096, %r8d # imm = 0x1000
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
.Ltmp20:
# %bb.19:
testl %eax, %eax
jne .LBB2_22
# %bb.20:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq $262144, 104(%rsp) # imm = 0x40000
leaq 120(%rsp), %rax
movq %rax, 32(%rsp)
leaq 112(%rsp), %rax
movq %rax, 40(%rsp)
leaq 104(%rsp), %rax
movq %rax, 48(%rsp)
.Ltmp21:
.cfi_escape 0x2e, 0x00
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
.Ltmp22:
# %bb.21: # %.noexc
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
.Ltmp23:
.cfi_escape 0x2e, 0x10
leaq 32(%rsp), %r9
movl $_Z9block_sumPKfPfm, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.Ltmp24:
.LBB2_22:
movl $0, 32(%rsp)
movq 8(%rsp), %rsi
.Ltmp26:
.cfi_escape 0x2e, 0x00
leaq 32(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
.Ltmp27:
# %bb.23:
.Ltmp28:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp29:
# %bb.24: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit28
movss 32(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
.Ltmp30:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp31:
# %bb.25: # %_ZNSolsEf.exit30
movq %rax, %r14
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%r14,%rax), %r15
testq %r15, %r15
je .LBB2_26
# %bb.31: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i42
cmpb $0, 56(%r15)
je .LBB2_33
# %bb.32:
movzbl 67(%r15), %eax
jmp .LBB2_35
.LBB2_33:
.Ltmp32:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp33:
# %bb.34: # %.noexc47
movq (%r15), %rax
.Ltmp34:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp35:
.LBB2_35: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i44
.Ltmp36:
.cfi_escape 0x2e, 0x00
movsbl %al, %esi
movq %r14, %rdi
callq _ZNSo3putEc
.Ltmp37:
# %bb.36: # %.noexc49
.Ltmp38:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp39:
# %bb.37: # %_ZNSolsEPFRSoS_E.exit32
movq 16(%rsp), %rdi
.Ltmp40:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp41:
# %bb.38:
movq 8(%rsp), %rdi
.Ltmp42:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp43:
# %bb.39: # %_ZNSt6vectorIfSaIfEED2Ev.exit
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _ZdlPv
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB2_7:
.cfi_def_cfa_offset 160
.Ltmp47:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.Ltmp48:
# %bb.8: # %.noexc36
.LBB2_26:
.Ltmp44:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.Ltmp45:
# %bb.30: # %.noexc46
.LBB2_28:
.Ltmp16:
jmp .LBB2_41
.LBB2_29:
.Ltmp25:
jmp .LBB2_41
.LBB2_27:
.Ltmp49:
jmp .LBB2_41
.LBB2_40:
.Ltmp46:
.LBB2_41:
movq %rax, %r14
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _ZdlPv
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table2:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp11-.Ltmp0 # Call between .Ltmp0 and .Ltmp11
.uleb128 .Ltmp49-.Lfunc_begin0 # jumps to .Ltmp49
.byte 0 # On action: cleanup
.uleb128 .Ltmp12-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp15-.Ltmp12 # Call between .Ltmp12 and .Ltmp15
.uleb128 .Ltmp16-.Lfunc_begin0 # jumps to .Ltmp16
.byte 0 # On action: cleanup
.uleb128 .Ltmp17-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp24-.Ltmp17 # Call between .Ltmp17 and .Ltmp24
.uleb128 .Ltmp25-.Lfunc_begin0 # jumps to .Ltmp25
.byte 0 # On action: cleanup
.uleb128 .Ltmp26-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp43-.Ltmp26 # Call between .Ltmp26 and .Ltmp43
.uleb128 .Ltmp46-.Lfunc_begin0 # jumps to .Ltmp46
.byte 0 # On action: cleanup
.uleb128 .Ltmp47-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp48-.Ltmp47 # Call between .Ltmp47 and .Ltmp48
.uleb128 .Ltmp49-.Lfunc_begin0 # jumps to .Ltmp49
.byte 0 # On action: cleanup
.uleb128 .Ltmp44-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Ltmp45-.Ltmp44 # Call between .Ltmp44 and .Ltmp45
.uleb128 .Ltmp46-.Lfunc_begin0 # jumps to .Ltmp46
.byte 0 # On action: cleanup
.uleb128 .Ltmp45-.Lfunc_begin0 # >> Call Site 8 <<
.uleb128 .Lfunc_end2-.Ltmp45 # Call between .Ltmp45 and .Lfunc_end2
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9block_sumPKfPfm, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9block_sumPKfPfm,@object # @_Z9block_sumPKfPfm
.section .rodata,"a",@progbits
.globl _Z9block_sumPKfPfm
.p2align 3, 0x0
_Z9block_sumPKfPfm:
.quad _Z24__device_stub__block_sumPKfPfm
.size _Z9block_sumPKfPfm, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Host sum: "
.size .L.str, 11
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Device sum: "
.size .L.str.1, 13
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9block_sumPKfPfm"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__block_sumPKfPfm
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z9block_sumPKfPfm
.addrsig_sym _ZSt4cerr
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9block_sumPKfPfm
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R7 ; /* 0x0000000002027a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe40000000f00 */
/*0090*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe40003f05270 */
/*00a0*/ ISETP.NE.AND P1, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fe20003f25270 */
/*00b0*/ STS [R7.X4], R2 ; /* 0x0000000207007388 */
/* 0x0041e80000004800 */
/*00c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00d0*/ @!P1 BRA 0x1d0 ; /* 0x000000f000009947 */
/* 0x000fea0003800000 */
/*00e0*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff007624 */
/* 0x001fe200078e00ff */
/*00f0*/ LOP3.LUT R3, RZ, R7, RZ, 0x33, !PT ; /* 0x00000007ff037212 */
/* 0x000fc800078e33ff */
/*0100*/ LEA.HI R2, R0, R0, RZ, 0x1 ; /* 0x0000000000027211 */
/* 0x000fc800078f08ff */
/*0110*/ SHF.R.S32.HI R6, RZ, 0x1, R2 ; /* 0x00000001ff067819 */
/* 0x000fc80000011402 */
/*0120*/ ISETP.GE.AND P1, PT, R7, R6, PT ; /* 0x000000060700720c */
/* 0x000fda0003f26270 */
/*0130*/ @!P1 IMAD.IADD R2, R3, 0x1, R0 ; /* 0x0000000103029824 */
/* 0x000fe200078e0200 */
/*0140*/ @!P1 LDS R4, [R7.X4] ; /* 0x0000000007049984 */
/* 0x000fe20000004800 */
/*0150*/ LOP3.LUT R0, R0, 0xfffffffe, RZ, 0xc0, !PT ; /* 0xfffffffe00007812 */
/* 0x000fc600078ec0ff */
/*0160*/ @!P1 LDS R5, [R2.X4] ; /* 0x0000000002059984 */
/* 0x000e240000004800 */
/*0170*/ @!P1 FADD R4, R4, R5 ; /* 0x0000000504049221 */
/* 0x001fca0000000000 */
/*0180*/ @!P1 STS [R7.X4], R4 ; /* 0x0000000407009388 */
/* 0x0001e80000004800 */
/*0190*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*01a0*/ ISETP.NE.AND P1, PT, R0, 0x2, PT ; /* 0x000000020000780c */
/* 0x000fe40003f25270 */
/*01b0*/ MOV R0, R6 ; /* 0x0000000600007202 */
/* 0x000fd60000000f00 */
/*01c0*/ @P1 BRA 0x100 ; /* 0xffffff3000001947 */
/* 0x001fea000383ffff */
/*01d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x001fea0003800000 */
/*01e0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*01f0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff027624 */
/* 0x000fe200078e00ff */
/*0200*/ MOV R3, c[0x0][0x16c] ; /* 0x00005b0000037a02 */
/* 0x000fca0000000f00 */
/*0210*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R2.64], R5 ; /* 0x000000050200798e */
/* 0x001fe2000c10e784 */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9block_sumPKfPfm
.globl _Z9block_sumPKfPfm
.p2align 8
.type _Z9block_sumPKfPfm,@function
_Z9block_sumPKfPfm:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_cmp_eq_u32 s2, 1
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
v_lshlrev_b32_e32 v1, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_5
v_not_b32_e32 v2, v0
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s4
s_and_b32 s2, s2, 0xfffe
s_waitcnt lgkmcnt(0)
s_cmp_lg_u32 s2, 2
s_mov_b32 s2, s3
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_5
.LBB0_3:
s_lshr_b32 s3, s2, 1
s_mov_b32 s4, exec_lo
v_cmpx_gt_u32_e64 s3, v0
s_cbranch_execz .LBB0_2
v_add_lshl_u32 v3, s2, v2, 2
ds_load_b32 v3, v3
ds_load_b32 v4, v1
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v3, v3, v4
ds_store_b32 v1, v3
s_branch .LBB0_2
.LBB0_5:
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_9
s_mov_b32 s2, exec_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mbcnt_lo_u32_b32 v0, s2, 0
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_and_b32 s3, exec_lo, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 exec_lo, s3
s_cbranch_execz .LBB0_9
s_load_b64 s[0:1], s[0:1], 0x8
s_bcnt1_i32_b32 s2, s2
s_delay_alu instid0(SALU_CYCLE_1)
v_cvt_f32_ubyte0_e32 v1, s2
v_mov_b32_e32 v2, 0
s_mov_b32 s2, 0
ds_load_b32 v0, v2
s_waitcnt lgkmcnt(0)
s_load_b32 s3, s[0:1], 0x0
v_mul_f32_e32 v3, v0, v1
s_waitcnt lgkmcnt(0)
v_mov_b32_e32 v1, s3
.LBB0_8:
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v0, v1, v3
global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v0, v1
v_mov_b32_e32 v1, v0
s_or_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB0_8
.LBB0_9:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9block_sumPKfPfm
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9block_sumPKfPfm, .Lfunc_end0-_Z9block_sumPKfPfm
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 8
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9block_sumPKfPfm
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9block_sumPKfPfm.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000eb491_00000000-6_cumulative_sum.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4075:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12random_floatv
.type _Z12random_floatv, @function
_Z12random_floatv:
.LFB4071:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4071:
.size _Z12random_floatv, .-_Z12random_floatv
.globl _Z32__device_stub__Z9block_sumPKfPfmPKfPfm
.type _Z32__device_stub__Z9block_sumPKfPfmPKfPfm, @function
_Z32__device_stub__Z9block_sumPKfPfmPKfPfm:
.LFB4097:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9block_sumPKfPfm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4097:
.size _Z32__device_stub__Z9block_sumPKfPfmPKfPfm, .-_Z32__device_stub__Z9block_sumPKfPfmPKfPfm
.globl _Z9block_sumPKfPfm
.type _Z9block_sumPKfPfm, @function
_Z9block_sumPKfPfm:
.LFB4098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9block_sumPKfPfmPKfPfm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4098:
.size _Z9block_sumPKfPfm, .-_Z9block_sumPKfPfm
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z9block_sumPKfPfm"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9block_sumPKfPfm(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4100:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt6vectorIfSaIfEED2Ev,"axG",@progbits,_ZNSt6vectorIfSaIfEED5Ev,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEED2Ev
.type _ZNSt6vectorIfSaIfEED2Ev, @function
_ZNSt6vectorIfSaIfEED2Ev:
.LFB4410:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L18
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L18:
ret
.cfi_endproc
.LFE4410:
.size _ZNSt6vectorIfSaIfEED2Ev, .-_ZNSt6vectorIfSaIfEED2Ev
.weak _ZNSt6vectorIfSaIfEED1Ev
.set _ZNSt6vectorIfSaIfEED1Ev,_ZNSt6vectorIfSaIfEED2Ev
.section .rodata.str1.1
.LC3:
.string "Host sum: "
.LC4:
.string "Device sum: "
.text
.globl main
.type main, @function
main:
.LFB4072:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4072
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $104, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movl $1048576, %edi
.LEHB0:
call _Znwm@PLT
.LEHE0:
movq %rax, %r13
movq %rax, 64(%rsp)
leaq 1048576(%rax), %rdx
movq %rdx, 80(%rsp)
movl $0x00000000, (%rax)
leaq 4(%rax), %rax
.L22:
movl $0x00000000, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L22
movq %rdx, 72(%rsp)
movq %r13, %rbx
leaq 1048576(%r13), %r12
movq %r13, %rbp
.L23:
call _Z12random_floatv
movss %xmm0, 0(%rbp)
addq $4, %rbp
cmpq %rbp, %r12
jne .L23
movl $0x00000000, 12(%rsp)
.L24:
movss 12(%rsp), %xmm1
addss (%rbx), %xmm1
movss %xmm1, 12(%rsp)
addq $4, %rbx
cmpq %r12, %rbx
jne .L24
leaq .LC3(%rip), %rsi
leaq _ZSt4cerr(%rip), %rdi
.LEHB1:
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq $0, 24(%rsp)
leaq 24(%rsp), %rdi
movl $1048576, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $1048576, %edx
movq %r13, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movq $0, 32(%rsp)
leaq 32(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1024, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $256, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $4096, %r8d
movq 52(%rsp), %rdx
movl $1, %ecx
movq 40(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L25
movl $262144, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z32__device_stub__Z9block_sumPKfPfmPKfPfm
.L25:
movl $0x00000000, 52(%rsp)
leaq 52(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC4(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 52(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
.LEHE1:
leaq 64(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L35
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
endbr64
movq %rax, %rbx
leaq 64(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
movq 88(%rsp), %rax
subq %fs:40, %rax
je .L27
call __stack_chk_fail@PLT
.L27:
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L35:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4072:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA4072:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4072-.LLSDACSB4072
.LLSDACSB4072:
.uleb128 .LEHB0-.LFB4072
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB4072
.uleb128 .LEHE1-.LEHB1
.uleb128 .L29-.LFB4072
.uleb128 0
.uleb128 .LEHB2-.LFB4072
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.LLSDACSE4072:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cumulative_sum.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z12random_floatv
.LCPI0_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl _Z12random_floatv
.p2align 4, 0x90
.type _Z12random_floatv,@function
_Z12random_floatv: # @_Z12random_floatv
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
callq rand
cvtsi2ss %eax, %xmm0
mulss .LCPI0_0(%rip), %xmm0
popq %rax
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z12random_floatv, .Lfunc_end0-_Z12random_floatv
.cfi_endproc
# -- End function
.globl _Z24__device_stub__block_sumPKfPfm # -- Begin function _Z24__device_stub__block_sumPKfPfm
.p2align 4, 0x90
.type _Z24__device_stub__block_sumPKfPfm,@function
_Z24__device_stub__block_sumPKfPfm: # @_Z24__device_stub__block_sumPKfPfm
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9block_sumPKfPfm, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z24__device_stub__block_sumPKfPfm, .Lfunc_end1-_Z24__device_stub__block_sumPKfPfm
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI2_0:
.long 0x30000000 # float 4.65661287E-10
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# %bb.0: # %_ZNSt6vectorIfSaIfEEC2EmRKS0_.exit
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $128, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
.cfi_escape 0x2e, 0x00
xorl %r14d, %r14d
xorl %edi, %edi
callq time
.cfi_escape 0x2e, 0x00
movl %eax, %edi
callq srand
.cfi_escape 0x2e, 0x00
movl $1048576, %edi # imm = 0x100000
callq _Znwm
movq %rax, %rbx
.cfi_escape 0x2e, 0x00
movl $1048576, %edx # imm = 0x100000
movq %rax, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
.cfi_escape 0x2e, 0x00
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI2_0(%rip), %xmm0
movss %xmm0, (%rbx,%r14,4)
incq %r14
cmpq $262144, %r14 # imm = 0x40000
jne .LBB2_1
# %bb.2: # %.lr.ph.i.preheader
xorps %xmm0, %xmm0
xorl %eax, %eax
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
addss (%rbx,%rax), %xmm0
addq $4, %rax
cmpq $1048576, %rax # imm = 0x100000
jne .LBB2_3
# %bb.4: # %_ZSt10accumulateIN9__gnu_cxx17__normal_iteratorIPfSt6vectorIfSaIfEEEEfET0_T_S8_S7_.exit
.Ltmp0:
movss %xmm0, 28(%rsp) # 4-byte Spill
.cfi_escape 0x2e, 0x00
movl $_ZSt4cerr, %edi
movl $.L.str, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp1:
# %bb.5: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movss 28(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
.Ltmp2:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cerr, %edi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp3:
# %bb.6: # %_ZNSolsEf.exit
movq %rax, %r14
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%r14,%rax), %r15
testq %r15, %r15
je .LBB2_7
# %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r15)
je .LBB2_11
# %bb.10:
movzbl 67(%r15), %eax
jmp .LBB2_13
.LBB2_11:
.Ltmp4:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp5:
# %bb.12: # %.noexc37
movq (%r15), %rax
.Ltmp6:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp7:
.LBB2_13: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i
.Ltmp8:
.cfi_escape 0x2e, 0x00
movsbl %al, %esi
movq %r14, %rdi
callq _ZNSo3putEc
.Ltmp9:
# %bb.14: # %.noexc39
.Ltmp10:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp11:
# %bb.15: # %_ZNSolsEPFRSoS_E.exit
movq $0, 16(%rsp)
.Ltmp12:
.cfi_escape 0x2e, 0x00
leaq 16(%rsp), %rdi
movl $1048576, %esi # imm = 0x100000
callq hipMalloc
.Ltmp13:
# %bb.16: # %_ZL9hipMallocIfE10hipError_tPPT_m.exit
movq 16(%rsp), %rdi
.Ltmp14:
.cfi_escape 0x2e, 0x00
movl $1048576, %edx # imm = 0x100000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
.Ltmp15:
# %bb.17:
movq $0, 8(%rsp)
.Ltmp17:
.cfi_escape 0x2e, 0x00
leaq 8(%rsp), %rdi
movl $4, %esi
callq hipMalloc
.Ltmp18:
# %bb.18: # %_ZL9hipMallocIfE10hipError_tPPT_m.exit25
.Ltmp19:
.cfi_escape 0x2e, 0x00
movabsq $4294967552, %rdi # imm = 0x100000100
movabsq $4294968320, %rdx # imm = 0x100000400
movl $4096, %r8d # imm = 0x1000
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
.Ltmp20:
# %bb.19:
testl %eax, %eax
jne .LBB2_22
# %bb.20:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq $262144, 104(%rsp) # imm = 0x40000
leaq 120(%rsp), %rax
movq %rax, 32(%rsp)
leaq 112(%rsp), %rax
movq %rax, 40(%rsp)
leaq 104(%rsp), %rax
movq %rax, 48(%rsp)
.Ltmp21:
.cfi_escape 0x2e, 0x00
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
.Ltmp22:
# %bb.21: # %.noexc
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
.Ltmp23:
.cfi_escape 0x2e, 0x10
leaq 32(%rsp), %r9
movl $_Z9block_sumPKfPfm, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.Ltmp24:
.LBB2_22:
movl $0, 32(%rsp)
movq 8(%rsp), %rsi
.Ltmp26:
.cfi_escape 0x2e, 0x00
leaq 32(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
.Ltmp27:
# %bb.23:
.Ltmp28:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $12, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp29:
# %bb.24: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit28
movss 32(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
.Ltmp30:
.cfi_escape 0x2e, 0x00
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
.Ltmp31:
# %bb.25: # %_ZNSolsEf.exit30
movq %rax, %r14
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%r14,%rax), %r15
testq %r15, %r15
je .LBB2_26
# %bb.31: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i42
cmpb $0, 56(%r15)
je .LBB2_33
# %bb.32:
movzbl 67(%r15), %eax
jmp .LBB2_35
.LBB2_33:
.Ltmp32:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp33:
# %bb.34: # %.noexc47
movq (%r15), %rax
.Ltmp34:
.cfi_escape 0x2e, 0x00
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
.Ltmp35:
.LBB2_35: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i44
.Ltmp36:
.cfi_escape 0x2e, 0x00
movsbl %al, %esi
movq %r14, %rdi
callq _ZNSo3putEc
.Ltmp37:
# %bb.36: # %.noexc49
.Ltmp38:
.cfi_escape 0x2e, 0x00
movq %rax, %rdi
callq _ZNSo5flushEv
.Ltmp39:
# %bb.37: # %_ZNSolsEPFRSoS_E.exit32
movq 16(%rsp), %rdi
.Ltmp40:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp41:
# %bb.38:
movq 8(%rsp), %rdi
.Ltmp42:
.cfi_escape 0x2e, 0x00
callq hipFree
.Ltmp43:
# %bb.39: # %_ZNSt6vectorIfSaIfEED2Ev.exit
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _ZdlPv
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB2_7:
.cfi_def_cfa_offset 160
.Ltmp47:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.Ltmp48:
# %bb.8: # %.noexc36
.LBB2_26:
.Ltmp44:
.cfi_escape 0x2e, 0x00
callq _ZSt16__throw_bad_castv
.Ltmp45:
# %bb.30: # %.noexc46
.LBB2_28:
.Ltmp16:
jmp .LBB2_41
.LBB2_29:
.Ltmp25:
jmp .LBB2_41
.LBB2_27:
.Ltmp49:
jmp .LBB2_41
.LBB2_40:
.Ltmp46:
.LBB2_41:
movq %rax, %r14
.cfi_escape 0x2e, 0x00
movq %rbx, %rdi
callq _ZdlPv
.cfi_escape 0x2e, 0x00
movq %r14, %rdi
callq _Unwind_Resume@PLT
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.p2align 2, 0x0
GCC_except_table2:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 255 # @TType Encoding = omit
.byte 1 # Call site Encoding = uleb128
.uleb128 .Lcst_end0-.Lcst_begin0
.Lcst_begin0:
.uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 <<
.uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 <<
.uleb128 .Ltmp11-.Ltmp0 # Call between .Ltmp0 and .Ltmp11
.uleb128 .Ltmp49-.Lfunc_begin0 # jumps to .Ltmp49
.byte 0 # On action: cleanup
.uleb128 .Ltmp12-.Lfunc_begin0 # >> Call Site 3 <<
.uleb128 .Ltmp15-.Ltmp12 # Call between .Ltmp12 and .Ltmp15
.uleb128 .Ltmp16-.Lfunc_begin0 # jumps to .Ltmp16
.byte 0 # On action: cleanup
.uleb128 .Ltmp17-.Lfunc_begin0 # >> Call Site 4 <<
.uleb128 .Ltmp24-.Ltmp17 # Call between .Ltmp17 and .Ltmp24
.uleb128 .Ltmp25-.Lfunc_begin0 # jumps to .Ltmp25
.byte 0 # On action: cleanup
.uleb128 .Ltmp26-.Lfunc_begin0 # >> Call Site 5 <<
.uleb128 .Ltmp43-.Ltmp26 # Call between .Ltmp26 and .Ltmp43
.uleb128 .Ltmp46-.Lfunc_begin0 # jumps to .Ltmp46
.byte 0 # On action: cleanup
.uleb128 .Ltmp47-.Lfunc_begin0 # >> Call Site 6 <<
.uleb128 .Ltmp48-.Ltmp47 # Call between .Ltmp47 and .Ltmp48
.uleb128 .Ltmp49-.Lfunc_begin0 # jumps to .Ltmp49
.byte 0 # On action: cleanup
.uleb128 .Ltmp44-.Lfunc_begin0 # >> Call Site 7 <<
.uleb128 .Ltmp45-.Ltmp44 # Call between .Ltmp44 and .Ltmp45
.uleb128 .Ltmp46-.Lfunc_begin0 # jumps to .Ltmp46
.byte 0 # On action: cleanup
.uleb128 .Ltmp45-.Lfunc_begin0 # >> Call Site 8 <<
.uleb128 .Lfunc_end2-.Ltmp45 # Call between .Ltmp45 and .Lfunc_end2
.byte 0 # has no landing pad
.byte 0 # On action: cleanup
.Lcst_end0:
.p2align 2, 0x0
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9block_sumPKfPfm, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9block_sumPKfPfm,@object # @_Z9block_sumPKfPfm
.section .rodata,"a",@progbits
.globl _Z9block_sumPKfPfm
.p2align 3, 0x0
_Z9block_sumPKfPfm:
.quad _Z24__device_stub__block_sumPKfPfm
.size _Z9block_sumPKfPfm, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Host sum: "
.size .L.str, 11
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Device sum: "
.size .L.str.1, 13
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9block_sumPKfPfm"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__block_sumPKfPfm
.addrsig_sym __gxx_personality_v0
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Unwind_Resume
.addrsig_sym _Z9block_sumPKfPfm
.addrsig_sym _ZSt4cerr
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#define MAXN 8000 /* Max value of N */
int N; /* Matrix Dimension*/
int numThreads; /* Number of Threads */
/*Random*/
#define randm() 4|2[uid]&3
/*CUDA Function for calculating mean column-wise and then reducing each column's totals*/
/*This Function will be called Number of blocks times*/
__global__ void Mean_SD_Norm(float* input,float* output ,float* mean_out,float* sd_out, int dim1, int numThread,int eval_ceil)
{
extern __shared__ float mean[];//shared 1D-matrix for storing temporary results for mean of each threads
extern __shared__ float sd[];//shared 1D-matrix for storing temporary results for sd of each threads
__shared__ float meansum;//shared 1D-matrix for storing mean total of each threads
__shared__ float sdsum;//shared 1D-matrix for storing SD total of each threads
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;//Getting Thread X Index for Particular Block
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;//Getting Thread Y Index for Particular Block
int eva_block,index;
unsigned int thread_id = threadIdx.y;//Getting Id of thread
unsigned int j = idx_y * dim1 + idx_x;//calculating index for input matrix
__syncthreads();//waiting for all threads
mean[thread_id]=input[j];//Assigned each column element of matrix to each thread
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);//calculating index of remaining element
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
mean[thread_id]+=input[index];
}
}
/*Reducing sum of each thread to final block sum*/
if(thread_id==0)
{
for(int i=0;i<numThread;i++)
{
meansum+=mean[thread_id+i];
}
mean_out[blockIdx.x]=meansum/dim1;//Mean of block
}
__syncthreads();
sd[thread_id] = powf(input[j] - mean_out[blockIdx.x], 2.0);//evaluating SD for each thread for particular block
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
sd[thread_id]+=powf(input[index] - mean_out[blockIdx.x], 2.0);
}
}
/*Reducing SD Sum of each thread to final block SD sum*/
if(thread_id==0)
{
sdsum=0;
for(int i=0;i<numThread;i++)
{
sdsum+=sd[thread_id+i];//calculating index of remaining element
}
sd_out[blockIdx.x]=sdsum/dim1;//SD of block
}
__syncthreads();//waiting for threads
/*Normalization of each block data on basis of mean and sd of each block*/
output[blockIdx.x*dim1+thread_id] = (input[thread_id+blockIdx.x*dim1] - mean_out[blockIdx.x]) / sd_out[blockIdx.x];
/*Reducing Normalized Sum for remaining elements*/
for(int i=0;i<eval_ceil;i++){
if((numThread+thread_id)+blockIdx.x*dim1 < dim1*dim1)
{
output[(numThread+thread_id)+blockIdx.x*dim1] = (input[(numThread+thread_id)+blockIdx.x*dim1] - mean_out[blockIdx.x])/sd_out[blockIdx.x];//Normalizing the Matrix Indexes
}
}
}
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 4) {
seed = atoi(argv[3]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 3) {
N = atoi(argv[1]);
numThreads = atoi(argv[2]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
/*Number of Threads should be less than or equal to 1024 else exit*/
if (numThreads > 1024)
{
printf("Number of threads cannot be more than %i.\n", 1024);
exit(0);
}
}
else
{
printf("Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n",argv[0]);
exit(0);
}
printf("\nMatrix dimension N = %i.\n", N);
}
int main(int argc, char **argv)
{
/* Timing variables */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
float* Host_Input = new float [N * N];//Input Matrix
float* Host_Output = new float [N * N];//Output Matrix
int i,j;
/*Initializing Input Matrix with random values*/
printf("\nInitializing...\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
//Host_Input[j* N + i] = j+1;
Host_Input[j* N + i] = (float)rand() / 32768.0;
}
}
float* input;//Device Input Matrix
float* output;//Device Output Matrix
float* mean_out;//Device Mean Matrix
float* sd_out;//Device SD Matrix
size_t matrix_size_2d = N * N * sizeof(float);//Size of 2D Matrix
size_t matrix_size_1d = N * sizeof(float);//Size of 1D Matrix
//allocated the device memory for source array
cudaMalloc(&input, matrix_size_2d);
cudaMemcpy(input, Host_Input, matrix_size_2d, cudaMemcpyHostToDevice);
//allocate the device memory for destination array
cudaMalloc(&output, matrix_size_2d);
//allocate the device memory for mean array
cudaMalloc(&mean_out, matrix_size_1d);
//allocate the device memory for sd array
cudaMalloc(&sd_out, matrix_size_1d);
dim3 dimBlock;
dim3 dimGrid;
/* Designing Decisions for number of blocks and number of threads in each block */
if( N < numThreads)
{
dimBlock.x = 1;
dimBlock.y = N;
dimGrid.x = N;
dimGrid.y = 1;
}
else
{
dimBlock.x = 1;
dimBlock.y = numThreads;
dimGrid.x = N;
dimGrid.y = 1;
}
/* Start Clock */
printf("\nStarting clock.\n");
cudaEventRecord(start);
gettimeofday(&etstart,&tzdummy);
etstart2 = times(&cputstart);
double d_ceil=(double)N/(double)numThreads;
int c=ceil(d_ceil);
//printf("nt=%d\t c1=%ld\tc=%d\n",nt,c1,c);
//Calling CUDA Kernel Function For Normalizing Matrix
Mean_SD_Norm<<<dimGrid, dimBlock, matrix_size_1d>>>(input,output,mean_out,sd_out,N,numThreads,c);
cudaDeviceSynchronize();
/* Stop Clock code below*/
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
/*Copying Output Device Matrix to Output Host Matrix*/
cudaMemcpy(Host_Output, output, N * N * sizeof(float), cudaMemcpyDeviceToHost);
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
/* if (N < 10) {
printf("\nB1 =\n\t");
for (i= 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%1.10f%s", Host_Output[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}*/
/* Display result time */
printf("\nElapsed time CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000);
printf("Elapsed GPU Time = %g ms \n",milliseconds);
printf("Effective Bandwidth in (GB/s): %f \n", (2*matrix_size_2d/milliseconds)/1e6);
float mean = N * log2((float)N) + N;
float sd = N * log2((float)N) + (2*N) + (2*N*N);
float norm = 2 * N * N;
printf("Effective Throughput in (GFLOPS/s): %f \n", ((mean+sd+norm)*1e-9)/(milliseconds*1e-3));
//deallocate device memory below
cudaFree(input);
cudaFree(output);
cudaFree(mean_out);
cudaFree(sd_out);
//deallocate Host Input and Host Output Matrix
free(Host_Input);
free(Host_Output);
exit(0);
} | .file "tmpxft_00051273_00000000-6_matrix_cuda_norm.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2075:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9time_seedv
.type _Z9time_seedv, @function
_Z9time_seedv:
.LFB2070:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rsi
leaq 16(%rsp), %rdi
call gettimeofday@PLT
movl 24(%rsp), %eax
movq 40(%rsp), %rdx
subq %fs:40, %rdx
jne .L6
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size _Z9time_seedv, .-_Z9time_seedv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Random seed = %i\n"
.LC1:
.string "N = %i is out of range.\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "Number of threads cannot be more than %i.\n"
.align 8
.LC3:
.string "Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n"
.section .rodata.str1.1
.LC4:
.string "\nMatrix dimension N = %i.\n"
.text
.globl _Z10parametersiPPc
.type _Z10parametersiPPc, @function
_Z10parametersiPPc:
.LFB2071:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movl %edi, %ebx
movq %rsi, %rbp
call _Z9time_seedv
movl %eax, %edi
call srand@PLT
cmpl $4, %ebx
je .L14
cmpl $2, %ebx
jg .L9
movq 0(%rbp), %rdx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call exit@PLT
.L14:
movq 24(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %ebx
movl %eax, %edi
call srand@PLT
movl %ebx, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L9:
movq 8(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, N(%rip)
movq 16(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, numThreads(%rip)
movl N(%rip), %edx
leal -1(%rdx), %ecx
cmpl $7999, %ecx
ja .L15
cmpl $1024, %eax
jle .L12
movl $1024, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call exit@PLT
.L15:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call exit@PLT
.L12:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2071:
.size _Z10parametersiPPc, .-_Z10parametersiPPc
.globl _Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii
.type _Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii, @function
_Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii:
.LFB2097:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z12Mean_SD_NormPfS_S_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii, .-_Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii
.globl _Z12Mean_SD_NormPfS_S_S_iii
.type _Z12Mean_SD_NormPfS_S_S_iii, @function
_Z12Mean_SD_NormPfS_S_S_iii:
.LFB2098:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _Z12Mean_SD_NormPfS_S_S_iii, .-_Z12Mean_SD_NormPfS_S_S_iii
.section .rodata.str1.1
.LC5:
.string "\nInitializing...\n"
.LC7:
.string "\nStarting clock.\n"
.LC9:
.string "Stopped clock.\n"
.section .rodata.str1.8
.align 8
.LC11:
.string "\nElapsed time CPU Time = %g ms.\n"
.section .rodata.str1.1
.LC12:
.string "Elapsed GPU Time = %g ms \n"
.section .rodata.str1.8
.align 8
.LC14:
.string "Effective Bandwidth in (GB/s): %f \n"
.align 8
.LC17:
.string "Effective Throughput in (GFLOPS/s): %f \n"
.text
.globl main
.type main, @function
main:
.LFB2072:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $232, %rsp
.cfi_def_cfa_offset 288
movl %edi, %ebx
movq %rsi, %rbp
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movq %rbp, %rsi
movl %ebx, %edi
call _Z10parametersiPPc
movl N(%rip), %eax
imull %eax, %eax
movslq %eax, %rdi
salq $2, %rdi
call _Znam@PLT
movq %rax, %r12
movl N(%rip), %eax
imull %eax, %eax
movslq %eax, %rdi
salq $2, %rdi
call _Znam@PLT
movq %rax, %r13
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl N(%rip), %eax
movl $0, %ebp
testl %eax, %eax
jg .L25
.L26:
movl %eax, %ebp
imull %eax, %ebp
movslq %ebp, %rbp
leaq 0(,%rbp,4), %r14
cltq
leaq 0(,%rax,4), %rbx
leaq 48(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
leaq 56(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, 88(%rsp)
movl $1, 96(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl N(%rip), %edx
movl numThreads(%rip), %eax
movl %edx, %ecx
cmpl %eax, %edx
jge .L30
movl %edx, %eax
.L30:
movl %eax, 92(%rsp)
movl %ecx, 100(%rsp)
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
leaq 80(%rsp), %rsi
leaq 112(%rsp), %rdi
call gettimeofday@PLT
leaq 144(%rsp), %rdi
call times@PLT
movl N(%rip), %r15d
movl numThreads(%rip), %r14d
movl 96(%rsp), %ecx
movl $0, %r9d
movq %rbx, %r8
movq 88(%rsp), %rdx
movq 100(%rsp), %rdi
movl 108(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L43
.L31:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 28(%rsp)
leaq 28(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 80(%rsp), %rsi
leaq 128(%rsp), %rdi
call gettimeofday@PLT
leaq 176(%rsp), %rdi
call times@PLT
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl N(%rip), %eax
imull %eax, %eax
movslq %eax, %rdx
salq $2, %rdx
movl $2, %ecx
movq 56(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
imulq $1000000, 128(%rsp), %rax
subq 120(%rsp), %rax
addq 136(%rsp), %rax
imulq $1000000, 112(%rsp), %rdx
subq %rdx, %rax
js .L32
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
.L33:
divss .LC10(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 28(%rsp), %xmm0
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
salq $3, %rbp
js .L34
pxor %xmm0, %xmm0
cvtsi2ssq %rbp, %xmm0
.L35:
divss 28(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
divsd .LC13(%rip), %xmm0
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl N(%rip), %ebx
pxor %xmm3, %xmm3
cvtsi2ssl %ebx, %xmm3
movss %xmm3, 8(%rsp)
movaps %xmm3, %xmm0
call log2f@PLT
movss %xmm0, 12(%rsp)
movss 8(%rsp), %xmm0
call log2f@PLT
movl %ebx, %eax
imull %ebx, %eax
addl %eax, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
addl %ebx, %ebx
pxor %xmm1, %xmm1
cvtsi2ssl %ebx, %xmm1
movss 8(%rsp), %xmm3
mulss %xmm3, %xmm0
addss %xmm1, %xmm0
addss %xmm2, %xmm0
movss 12(%rsp), %xmm1
mulss %xmm3, %xmm1
addss %xmm3, %xmm1
addss %xmm1, %xmm0
addss %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LC15(%rip), %xmm0
pxor %xmm1, %xmm1
cvtss2sd 28(%rsp), %xmm1
mulsd .LC16(%rip), %xmm1
divsd %xmm1, %xmm0
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movl $0, %edi
call exit@PLT
.L27:
call rand@PLT
movl %eax, %ecx
movl N(%rip), %edx
movl %edx, %eax
imull %ebx, %eax
addl %ebp, %eax
cltq
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
mulss .LC6(%rip), %xmm0
movss %xmm0, (%r12,%rax,4)
addl $1, %ebx
cmpl %ebx, %edx
jg .L27
.L28:
addl $1, %ebp
movl N(%rip), %eax
cmpl %ebp, %eax
jle .L26
.L25:
movl $0, %ebx
cmpl $0, N(%rip)
jg .L27
jmp .L28
.L43:
pxor %xmm0, %xmm0
cvtsi2sdl %r15d, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdl %r14d, %xmm1
divsd %xmm1, %xmm0
call ceil@PLT
subq $8, %rsp
.cfi_def_cfa_offset 296
cvttsd2sil %xmm0, %eax
pushq %rax
.cfi_def_cfa_offset 304
movl numThreads(%rip), %r9d
movl N(%rip), %r8d
movq 88(%rsp), %rcx
movq 80(%rsp), %rdx
movq 72(%rsp), %rsi
movq 64(%rsp), %rdi
call _Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii
addq $16, %rsp
.cfi_def_cfa_offset 288
jmp .L31
.L32:
movq %rax, %rdx
shrq %rdx
andl $1, %eax
orq %rax, %rdx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
addss %xmm0, %xmm0
jmp .L33
.L34:
shrq %rbp
pxor %xmm0, %xmm0
cvtsi2ssq %rbp, %xmm0
addss %xmm0, %xmm0
jmp .L35
.cfi_endproc
.LFE2072:
.size main, .-main
.section .rodata.str1.1
.LC18:
.string "_Z12Mean_SD_NormPfS_S_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z12Mean_SD_NormPfS_S_S_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2100:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl numThreads
.bss
.align 4
.type numThreads, @object
.size numThreads, 4
numThreads:
.zero 4
.globl N
.align 4
.type N, @object
.size N, 4
N:
.zero 4
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC6:
.long 939524096
.align 4
.LC10:
.long 1148846080
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC13:
.long 0
.long 1093567616
.align 8
.LC15:
.long -400107883
.long 1041313291
.align 8
.LC16:
.long -755914244
.long 1062232653
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#define MAXN 8000 /* Max value of N */
int N; /* Matrix Dimension*/
int numThreads; /* Number of Threads */
/*Random*/
#define randm() 4|2[uid]&3
/*CUDA Function for calculating mean column-wise and then reducing each column's totals*/
/*This Function will be called Number of blocks times*/
__global__ void Mean_SD_Norm(float* input,float* output ,float* mean_out,float* sd_out, int dim1, int numThread,int eval_ceil)
{
extern __shared__ float mean[];//shared 1D-matrix for storing temporary results for mean of each threads
extern __shared__ float sd[];//shared 1D-matrix for storing temporary results for sd of each threads
__shared__ float meansum;//shared 1D-matrix for storing mean total of each threads
__shared__ float sdsum;//shared 1D-matrix for storing SD total of each threads
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;//Getting Thread X Index for Particular Block
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;//Getting Thread Y Index for Particular Block
int eva_block,index;
unsigned int thread_id = threadIdx.y;//Getting Id of thread
unsigned int j = idx_y * dim1 + idx_x;//calculating index for input matrix
__syncthreads();//waiting for all threads
mean[thread_id]=input[j];//Assigned each column element of matrix to each thread
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);//calculating index of remaining element
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
mean[thread_id]+=input[index];
}
}
/*Reducing sum of each thread to final block sum*/
if(thread_id==0)
{
for(int i=0;i<numThread;i++)
{
meansum+=mean[thread_id+i];
}
mean_out[blockIdx.x]=meansum/dim1;//Mean of block
}
__syncthreads();
sd[thread_id] = powf(input[j] - mean_out[blockIdx.x], 2.0);//evaluating SD for each thread for particular block
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
sd[thread_id]+=powf(input[index] - mean_out[blockIdx.x], 2.0);
}
}
/*Reducing SD Sum of each thread to final block SD sum*/
if(thread_id==0)
{
sdsum=0;
for(int i=0;i<numThread;i++)
{
sdsum+=sd[thread_id+i];//calculating index of remaining element
}
sd_out[blockIdx.x]=sdsum/dim1;//SD of block
}
__syncthreads();//waiting for threads
/*Normalization of each block data on basis of mean and sd of each block*/
output[blockIdx.x*dim1+thread_id] = (input[thread_id+blockIdx.x*dim1] - mean_out[blockIdx.x]) / sd_out[blockIdx.x];
/*Reducing Normalized Sum for remaining elements*/
for(int i=0;i<eval_ceil;i++){
if((numThread+thread_id)+blockIdx.x*dim1 < dim1*dim1)
{
output[(numThread+thread_id)+blockIdx.x*dim1] = (input[(numThread+thread_id)+blockIdx.x*dim1] - mean_out[blockIdx.x])/sd_out[blockIdx.x];//Normalizing the Matrix Indexes
}
}
}
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 4) {
seed = atoi(argv[3]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 3) {
N = atoi(argv[1]);
numThreads = atoi(argv[2]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
/*Number of Threads should be less than or equal to 1024 else exit*/
if (numThreads > 1024)
{
printf("Number of threads cannot be more than %i.\n", 1024);
exit(0);
}
}
else
{
printf("Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n",argv[0]);
exit(0);
}
printf("\nMatrix dimension N = %i.\n", N);
}
int main(int argc, char **argv)
{
/* Timing variables */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
float* Host_Input = new float [N * N];//Input Matrix
float* Host_Output = new float [N * N];//Output Matrix
int i,j;
/*Initializing Input Matrix with random values*/
printf("\nInitializing...\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
//Host_Input[j* N + i] = j+1;
Host_Input[j* N + i] = (float)rand() / 32768.0;
}
}
float* input;//Device Input Matrix
float* output;//Device Output Matrix
float* mean_out;//Device Mean Matrix
float* sd_out;//Device SD Matrix
size_t matrix_size_2d = N * N * sizeof(float);//Size of 2D Matrix
size_t matrix_size_1d = N * sizeof(float);//Size of 1D Matrix
//allocated the device memory for source array
cudaMalloc(&input, matrix_size_2d);
cudaMemcpy(input, Host_Input, matrix_size_2d, cudaMemcpyHostToDevice);
//allocate the device memory for destination array
cudaMalloc(&output, matrix_size_2d);
//allocate the device memory for mean array
cudaMalloc(&mean_out, matrix_size_1d);
//allocate the device memory for sd array
cudaMalloc(&sd_out, matrix_size_1d);
dim3 dimBlock;
dim3 dimGrid;
/* Designing Decisions for number of blocks and number of threads in each block */
if( N < numThreads)
{
dimBlock.x = 1;
dimBlock.y = N;
dimGrid.x = N;
dimGrid.y = 1;
}
else
{
dimBlock.x = 1;
dimBlock.y = numThreads;
dimGrid.x = N;
dimGrid.y = 1;
}
/* Start Clock */
printf("\nStarting clock.\n");
cudaEventRecord(start);
gettimeofday(&etstart,&tzdummy);
etstart2 = times(&cputstart);
double d_ceil=(double)N/(double)numThreads;
int c=ceil(d_ceil);
//printf("nt=%d\t c1=%ld\tc=%d\n",nt,c1,c);
//Calling CUDA Kernel Function For Normalizing Matrix
Mean_SD_Norm<<<dimGrid, dimBlock, matrix_size_1d>>>(input,output,mean_out,sd_out,N,numThreads,c);
cudaDeviceSynchronize();
/* Stop Clock code below*/
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
/*Copying Output Device Matrix to Output Host Matrix*/
cudaMemcpy(Host_Output, output, N * N * sizeof(float), cudaMemcpyDeviceToHost);
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
/* if (N < 10) {
printf("\nB1 =\n\t");
for (i= 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%1.10f%s", Host_Output[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}*/
/* Display result time */
printf("\nElapsed time CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000);
printf("Elapsed GPU Time = %g ms \n",milliseconds);
printf("Effective Bandwidth in (GB/s): %f \n", (2*matrix_size_2d/milliseconds)/1e6);
float mean = N * log2((float)N) + N;
float sd = N * log2((float)N) + (2*N) + (2*N*N);
float norm = 2 * N * N;
printf("Effective Throughput in (GFLOPS/s): %f \n", ((mean+sd+norm)*1e-9)/(milliseconds*1e-3));
//deallocate device memory below
cudaFree(input);
cudaFree(output);
cudaFree(mean_out);
cudaFree(sd_out);
//deallocate Host Input and Host Output Matrix
free(Host_Input);
free(Host_Output);
exit(0);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#define MAXN 8000 /* Max value of N */
int N; /* Matrix Dimension*/
int numThreads; /* Number of Threads */
/*Random*/
#define randm() 4|2[uid]&3
/*CUDA Function for calculating mean column-wise and then reducing each column's totals*/
/*This Function will be called Number of blocks times*/
__global__ void Mean_SD_Norm(float* input,float* output ,float* mean_out,float* sd_out, int dim1, int numThread,int eval_ceil)
{
extern __shared__ float mean[];//shared 1D-matrix for storing temporary results for mean of each threads
extern __shared__ float sd[];//shared 1D-matrix for storing temporary results for sd of each threads
__shared__ float meansum;//shared 1D-matrix for storing mean total of each threads
__shared__ float sdsum;//shared 1D-matrix for storing SD total of each threads
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;//Getting Thread X Index for Particular Block
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;//Getting Thread Y Index for Particular Block
int eva_block,index;
unsigned int thread_id = threadIdx.y;//Getting Id of thread
unsigned int j = idx_y * dim1 + idx_x;//calculating index for input matrix
__syncthreads();//waiting for all threads
mean[thread_id]=input[j];//Assigned each column element of matrix to each thread
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);//calculating index of remaining element
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
mean[thread_id]+=input[index];
}
}
/*Reducing sum of each thread to final block sum*/
if(thread_id==0)
{
for(int i=0;i<numThread;i++)
{
meansum+=mean[thread_id+i];
}
mean_out[blockIdx.x]=meansum/dim1;//Mean of block
}
__syncthreads();
sd[thread_id] = powf(input[j] - mean_out[blockIdx.x], 2.0);//evaluating SD for each thread for particular block
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
sd[thread_id]+=powf(input[index] - mean_out[blockIdx.x], 2.0);
}
}
/*Reducing SD Sum of each thread to final block SD sum*/
if(thread_id==0)
{
sdsum=0;
for(int i=0;i<numThread;i++)
{
sdsum+=sd[thread_id+i];//calculating index of remaining element
}
sd_out[blockIdx.x]=sdsum/dim1;//SD of block
}
__syncthreads();//waiting for threads
/*Normalization of each block data on basis of mean and sd of each block*/
output[blockIdx.x*dim1+thread_id] = (input[thread_id+blockIdx.x*dim1] - mean_out[blockIdx.x]) / sd_out[blockIdx.x];
/*Reducing Normalized Sum for remaining elements*/
for(int i=0;i<eval_ceil;i++){
if((numThread+thread_id)+blockIdx.x*dim1 < dim1*dim1)
{
output[(numThread+thread_id)+blockIdx.x*dim1] = (input[(numThread+thread_id)+blockIdx.x*dim1] - mean_out[blockIdx.x])/sd_out[blockIdx.x];//Normalizing the Matrix Indexes
}
}
}
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 4) {
seed = atoi(argv[3]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 3) {
N = atoi(argv[1]);
numThreads = atoi(argv[2]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
/*Number of Threads should be less than or equal to 1024 else exit*/
if (numThreads > 1024)
{
printf("Number of threads cannot be more than %i.\n", 1024);
exit(0);
}
}
else
{
printf("Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n",argv[0]);
exit(0);
}
printf("\nMatrix dimension N = %i.\n", N);
}
int main(int argc, char **argv)
{
/* Timing variables */
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
float* Host_Input = new float [N * N];//Input Matrix
float* Host_Output = new float [N * N];//Output Matrix
int i,j;
/*Initializing Input Matrix with random values*/
printf("\nInitializing...\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
//Host_Input[j* N + i] = j+1;
Host_Input[j* N + i] = (float)rand() / 32768.0;
}
}
float* input;//Device Input Matrix
float* output;//Device Output Matrix
float* mean_out;//Device Mean Matrix
float* sd_out;//Device SD Matrix
size_t matrix_size_2d = N * N * sizeof(float);//Size of 2D Matrix
size_t matrix_size_1d = N * sizeof(float);//Size of 1D Matrix
//allocated the device memory for source array
hipMalloc(&input, matrix_size_2d);
hipMemcpy(input, Host_Input, matrix_size_2d, hipMemcpyHostToDevice);
//allocate the device memory for destination array
hipMalloc(&output, matrix_size_2d);
//allocate the device memory for mean array
hipMalloc(&mean_out, matrix_size_1d);
//allocate the device memory for sd array
hipMalloc(&sd_out, matrix_size_1d);
dim3 dimBlock;
dim3 dimGrid;
/* Designing Decisions for number of blocks and number of threads in each block */
if( N < numThreads)
{
dimBlock.x = 1;
dimBlock.y = N;
dimGrid.x = N;
dimGrid.y = 1;
}
else
{
dimBlock.x = 1;
dimBlock.y = numThreads;
dimGrid.x = N;
dimGrid.y = 1;
}
/* Start Clock */
printf("\nStarting clock.\n");
hipEventRecord(start);
gettimeofday(&etstart,&tzdummy);
etstart2 = times(&cputstart);
double d_ceil=(double)N/(double)numThreads;
int c=ceil(d_ceil);
//printf("nt=%d\t c1=%ld\tc=%d\n",nt,c1,c);
//Calling CUDA Kernel Function For Normalizing Matrix
Mean_SD_Norm<<<dimGrid, dimBlock, matrix_size_1d>>>(input,output,mean_out,sd_out,N,numThreads,c);
hipDeviceSynchronize();
/* Stop Clock code below*/
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
/*Copying Output Device Matrix to Output Host Matrix*/
hipMemcpy(Host_Output, output, N * N * sizeof(float), hipMemcpyDeviceToHost);
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
/* if (N < 10) {
printf("\nB1 =\n\t");
for (i= 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%1.10f%s", Host_Output[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}*/
/* Display result time */
printf("\nElapsed time CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000);
printf("Elapsed GPU Time = %g ms \n",milliseconds);
printf("Effective Bandwidth in (GB/s): %f \n", (2*matrix_size_2d/milliseconds)/1e6);
float mean = N * log2((float)N) + N;
float sd = N * log2((float)N) + (2*N) + (2*N*N);
float norm = 2 * N * N;
printf("Effective Throughput in (GFLOPS/s): %f \n", ((mean+sd+norm)*1e-9)/(milliseconds*1e-3));
//deallocate device memory below
hipFree(input);
hipFree(output);
hipFree(mean_out);
hipFree(sd_out);
//deallocate Host Input and Host Output Matrix
free(Host_Input);
free(Host_Output);
exit(0);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#define MAXN 8000 /* Max value of N */
int N; /* Matrix Dimension*/
int numThreads; /* Number of Threads */
/*Random*/
#define randm() 4|2[uid]&3
/*CUDA Function for calculating mean column-wise and then reducing each column's totals*/
/*This Function will be called Number of blocks times*/
__global__ void Mean_SD_Norm(float* input,float* output ,float* mean_out,float* sd_out, int dim1, int numThread,int eval_ceil)
{
extern __shared__ float mean[];//shared 1D-matrix for storing temporary results for mean of each threads
extern __shared__ float sd[];//shared 1D-matrix for storing temporary results for sd of each threads
__shared__ float meansum;//shared 1D-matrix for storing mean total of each threads
__shared__ float sdsum;//shared 1D-matrix for storing SD total of each threads
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;//Getting Thread X Index for Particular Block
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;//Getting Thread Y Index for Particular Block
int eva_block,index;
unsigned int thread_id = threadIdx.y;//Getting Id of thread
unsigned int j = idx_y * dim1 + idx_x;//calculating index for input matrix
__syncthreads();//waiting for all threads
mean[thread_id]=input[j];//Assigned each column element of matrix to each thread
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);//calculating index of remaining element
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
mean[thread_id]+=input[index];
}
}
/*Reducing sum of each thread to final block sum*/
if(thread_id==0)
{
for(int i=0;i<numThread;i++)
{
meansum+=mean[thread_id+i];
}
mean_out[blockIdx.x]=meansum/dim1;//Mean of block
}
__syncthreads();
sd[thread_id] = powf(input[j] - mean_out[blockIdx.x], 2.0);//evaluating SD for each thread for particular block
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
sd[thread_id]+=powf(input[index] - mean_out[blockIdx.x], 2.0);
}
}
/*Reducing SD Sum of each thread to final block SD sum*/
if(thread_id==0)
{
sdsum=0;
for(int i=0;i<numThread;i++)
{
sdsum+=sd[thread_id+i];//calculating index of remaining element
}
sd_out[blockIdx.x]=sdsum/dim1;//SD of block
}
__syncthreads();//waiting for threads
/*Normalization of each block data on basis of mean and sd of each block*/
output[blockIdx.x*dim1+thread_id] = (input[thread_id+blockIdx.x*dim1] - mean_out[blockIdx.x]) / sd_out[blockIdx.x];
/*Reducing Normalized Sum for remaining elements*/
for(int i=0;i<eval_ceil;i++){
if((numThread+thread_id)+blockIdx.x*dim1 < dim1*dim1)
{
output[(numThread+thread_id)+blockIdx.x*dim1] = (input[(numThread+thread_id)+blockIdx.x*dim1] - mean_out[blockIdx.x])/sd_out[blockIdx.x];//Normalizing the Matrix Indexes
}
}
}
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 4) {
seed = atoi(argv[3]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 3) {
N = atoi(argv[1]);
numThreads = atoi(argv[2]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
/*Number of Threads should be less than or equal to 1024 else exit*/
if (numThreads > 1024)
{
printf("Number of threads cannot be more than %i.\n", 1024);
exit(0);
}
}
else
{
printf("Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n",argv[0]);
exit(0);
}
printf("\nMatrix dimension N = %i.\n", N);
}
int main(int argc, char **argv)
{
/* Timing variables */
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
float* Host_Input = new float [N * N];//Input Matrix
float* Host_Output = new float [N * N];//Output Matrix
int i,j;
/*Initializing Input Matrix with random values*/
printf("\nInitializing...\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
//Host_Input[j* N + i] = j+1;
Host_Input[j* N + i] = (float)rand() / 32768.0;
}
}
float* input;//Device Input Matrix
float* output;//Device Output Matrix
float* mean_out;//Device Mean Matrix
float* sd_out;//Device SD Matrix
size_t matrix_size_2d = N * N * sizeof(float);//Size of 2D Matrix
size_t matrix_size_1d = N * sizeof(float);//Size of 1D Matrix
//allocated the device memory for source array
hipMalloc(&input, matrix_size_2d);
hipMemcpy(input, Host_Input, matrix_size_2d, hipMemcpyHostToDevice);
//allocate the device memory for destination array
hipMalloc(&output, matrix_size_2d);
//allocate the device memory for mean array
hipMalloc(&mean_out, matrix_size_1d);
//allocate the device memory for sd array
hipMalloc(&sd_out, matrix_size_1d);
dim3 dimBlock;
dim3 dimGrid;
/* Designing Decisions for number of blocks and number of threads in each block */
if( N < numThreads)
{
dimBlock.x = 1;
dimBlock.y = N;
dimGrid.x = N;
dimGrid.y = 1;
}
else
{
dimBlock.x = 1;
dimBlock.y = numThreads;
dimGrid.x = N;
dimGrid.y = 1;
}
/* Start Clock */
printf("\nStarting clock.\n");
hipEventRecord(start);
gettimeofday(&etstart,&tzdummy);
etstart2 = times(&cputstart);
double d_ceil=(double)N/(double)numThreads;
int c=ceil(d_ceil);
//printf("nt=%d\t c1=%ld\tc=%d\n",nt,c1,c);
//Calling CUDA Kernel Function For Normalizing Matrix
Mean_SD_Norm<<<dimGrid, dimBlock, matrix_size_1d>>>(input,output,mean_out,sd_out,N,numThreads,c);
hipDeviceSynchronize();
/* Stop Clock code below*/
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
/*Copying Output Device Matrix to Output Host Matrix*/
hipMemcpy(Host_Output, output, N * N * sizeof(float), hipMemcpyDeviceToHost);
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
/* if (N < 10) {
printf("\nB1 =\n\t");
for (i= 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%1.10f%s", Host_Output[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}*/
/* Display result time */
printf("\nElapsed time CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000);
printf("Elapsed GPU Time = %g ms \n",milliseconds);
printf("Effective Bandwidth in (GB/s): %f \n", (2*matrix_size_2d/milliseconds)/1e6);
float mean = N * log2((float)N) + N;
float sd = N * log2((float)N) + (2*N) + (2*N*N);
float norm = 2 * N * N;
printf("Effective Throughput in (GFLOPS/s): %f \n", ((mean+sd+norm)*1e-9)/(milliseconds*1e-3));
//deallocate device memory below
hipFree(input);
hipFree(output);
hipFree(mean_out);
hipFree(sd_out);
//deallocate Host Input and Host Output Matrix
free(Host_Input);
free(Host_Output);
exit(0);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12Mean_SD_NormPfS_S_S_iii
.globl _Z12Mean_SD_NormPfS_S_S_iii
.p2align 8
.type _Z12Mean_SD_NormPfS_S_S_iii,@function
_Z12Mean_SD_NormPfS_S_S_iii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x3c
s_load_b64 s[6:7], s[0:1], 0x20
v_bfe_u32 v1, v0, 10, 10
s_load_b64 s[8:9], s[0:1], 0x0
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_lshr_b32 s3, s2, 16
s_and_b32 s2, s2, 0xffff
v_mad_u64_u32 v[2:3], null, s15, s3, v[1:2]
v_mov_b32_e32 v3, 0
s_mul_i32 s2, s14, s2
v_add_nc_u32_e32 v6, s7, v1
s_cmp_lt_i32 s6, 1
s_mul_i32 s12, s6, s6
s_mul_i32 s13, s7, s6
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, v2, s6
v_add3_u32 v2, s2, v0, v2
v_lshl_add_u32 v0, v1, 2, 8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s8, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
global_load_b32 v4, v[2:3], off
s_waitcnt vmcnt(0)
ds_store_b32 v0, v4
s_cbranch_scc1 .LBB0_5
v_mul_lo_u32 v4, s6, v6
s_mov_b32 s2, 0
s_branch .LBB0_3
.p2align 6
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
v_add_nc_u32_e32 v4, s13, v4
s_add_i32 s2, s2, s7
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s2, s6
s_cbranch_scc1 .LBB0_5
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, s14, v4
s_mov_b32 s3, exec_lo
v_cmpx_gt_i32_e64 s12, v5
s_cbranch_execz .LBB0_2
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[7:8], 2, v[4:5]
v_add_co_u32 v7, vcc_lo, s8, v7
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v8, vcc_lo, s9, v8, vcc_lo
global_load_b32 v5, v[7:8], off
ds_load_b32 v7, v0
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_f32_e32 v5, v5, v7
ds_store_b32 v0, v5
s_branch .LBB0_2
.LBB0_5:
s_load_b64 s[4:5], s[0:1], 0x10
v_cmp_eq_u32_e64 s2, 0, v1
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_11
s_cmp_lt_i32 s7, 1
s_cbranch_scc1 .LBB0_10
v_mov_b32_e32 v0, 0
s_mov_b32 s10, 8
s_mov_b32 s11, s7
ds_load_b32 v0, v0 offset:4
.LBB0_8:
v_mov_b32_e32 v4, s10
s_add_i32 s11, s11, -1
s_add_i32 s10, s10, 4
s_cmp_eq_u32 s11, 0
ds_load_b32 v4, v4
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v0, v4, v0
s_cbranch_scc0 .LBB0_8
v_mov_b32_e32 v4, 0
ds_store_b32 v4, v0 offset:4
.LBB0_10:
v_mov_b32_e32 v0, 0
v_cvt_f32_i32_e32 v5, s6
s_mov_b32 s15, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[10:11], s[14:15], 2
ds_load_b32 v4, v0 offset:4
s_waitcnt lgkmcnt(0)
s_add_u32 s10, s4, s10
s_addc_u32 s11, s5, s11
v_div_scale_f32 v7, null, v5, v5, v4
v_div_scale_f32 v10, vcc_lo, v4, v5, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v8, v7
s_waitcnt_depctr 0xfff
v_fma_f32 v9, -v7, v8, 1.0
v_fmac_f32_e32 v8, v9, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v9, v10, v8
v_fma_f32 v11, -v7, v9, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v9, v11, v8
v_fma_f32 v7, -v7, v9, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f32 v7, v7, v8, v9
v_div_fixup_f32 v4, v7, v5, v4
global_store_b32 v0, v4, s[10:11]
.LBB0_11:
s_or_b32 exec_lo, exec_lo, s3
s_mov_b32 s15, 0
v_mov_b32_e32 v0, 0
s_lshl_b64 s[10:11], s[14:15], 2
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_add_u32 s10, s4, s10
s_addc_u32 s11, s5, s11
s_barrier
buffer_gl0_inv
global_load_b32 v2, v[2:3], off
global_load_b32 v0, v0, s[10:11]
s_mov_b32 s5, 0x3e76c4e1
s_waitcnt vmcnt(0)
v_sub_f32_e32 v2, v2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_frexp_mant_f32_e64 v3, |v2|
v_cmp_gt_f32_e32 vcc_lo, 0x3f2aaaab, v3
v_cndmask_b32_e64 v4, 0, 1, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ldexp_f32 v3, v3, v4
v_add_f32_e32 v7, -1.0, v3
v_add_f32_e32 v4, 1.0, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v5, v4
v_add_f32_e32 v9, -1.0, v4
v_sub_f32_e32 v3, v3, v9
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v8, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v10, v4, v8
v_fma_f32 v4, v8, v4, -v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v8, v3
v_add_f32_e32 v3, v10, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_sub_f32_e32 v9, v7, v3
v_sub_f32_e32 v10, v3, v10
v_dual_sub_f32 v7, v7, v9 :: v_dual_sub_f32 v4, v10, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v3, v7, v3
v_add_f32_e32 v3, v4, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v3, v9, v3
v_mul_f32_e32 v3, v5, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v4, v8, v3
v_sub_f32_e32 v5, v4, v8
v_mul_f32_e32 v7, v4, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v3, v3, v5
v_fma_f32 v5, v4, v4, -v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v8, v3, v3
v_fmac_f32_e32 v5, v4, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v8, v7, v5
v_fmaak_f32 v9, s5, v8, 0x3e91f4c4
v_mul_f32_e32 v12, v4, v8
v_sub_f32_e32 v7, v8, v7
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fmaak_f32 v9, v8, v9, 0x3ecccdef
v_fma_f32 v13, v8, v4, -v12
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_sub_f32 v5, v5, v7 :: v_dual_mul_f32 v10, v8, v9
v_fmac_f32_e32 v13, v8, v3
v_ldexp_f32 v3, v3, 1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fma_f32 v7, v8, v9, -v10
v_fmac_f32_e32 v13, v5, v4
v_ldexp_f32 v4, v4, 1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_f32_e32 v8, v12, v13
v_fmac_f32_e32 v7, v5, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v9, v10, v7
v_dual_sub_f32 v10, v9, v10 :: v_dual_add_f32 v11, 0x3f2aaaaa, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_sub_f32 v7, v7, v10 :: v_dual_add_f32 v10, 0xbf2aaaaa, v11
v_add_f32_e32 v7, 0x31739010, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v9, v9, v10
v_add_f32_e32 v7, v7, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v5, v11, v7
v_sub_f32_e32 v9, v11, v5
v_mul_f32_e32 v10, v8, v5
v_sub_f32_e32 v11, v8, v12
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f32_e32 v7, v7, v9
v_fma_f32 v9, v8, v5, -v10
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v11, v13, v11
v_fmac_f32_e32 v9, v8, v7
v_frexp_exp_i32_f32_e32 v7, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fmac_f32_e32 v9, v11, v5
v_subrev_co_ci_u32_e32 v5, vcc_lo, 0, v7, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, 1.0, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f32_e32 v7, v10, v9
v_cvt_f32_i32_e32 v5, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f32_e32 v8, v4, v7
v_dual_sub_f32 v10, v7, v10 :: v_dual_mul_f32 v11, 0x3f317218, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_sub_f32 v4, v8, v4 :: v_dual_sub_f32 v9, v9, v10
v_fma_f32 v10, v5, 0x3f317218, -v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_sub_f32_e32 v4, v7, v4
v_add_f32_e32 v3, v3, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_fmac_f32 v10, 0xb102e308, v5 :: v_dual_add_f32 v3, v3, v4
v_dual_add_f32 v4, v11, v10 :: v_dual_add_f32 v5, v8, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v11, v4, v11
v_dual_add_f32 v7, v4, v5 :: v_dual_sub_f32 v8, v5, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_sub_f32 v10, v10, v11 :: v_dual_sub_f32 v9, v7, v4
v_sub_f32_e32 v3, v3, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v12, v7, v9
v_dual_sub_f32 v5, v5, v9 :: v_dual_add_f32 v8, v10, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v4, v4, v12
v_dual_add_f32 v4, v5, v4 :: v_dual_sub_f32 v5, v8, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f32_e32 v4, v8, v4
v_sub_f32_e32 v8, v8, v5
v_sub_f32_e32 v3, v3, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f32_e32 v9, v7, v4
v_sub_f32_e32 v5, v10, v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v7, v9, v7
v_add_f32_e32 v3, v3, v5
v_cndmask_b32_e64 v5, 2.0, 1.0, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v4, v4, v7
v_add_f32_e32 v3, v3, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v4, v9, v3
v_dual_sub_f32 v7, v4, v9 :: v_dual_mul_f32 v8, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v3, v3, v7
v_fma_f32 v4, v5, v4, -v8
v_cmp_class_f32_e64 vcc_lo, v8, 0x204
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v5, v3
v_add_f32_e32 v3, v8, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v7, v3, v8, vcc_lo
v_sub_f32_e32 v3, v3, v8
v_cmp_eq_f32_e32 vcc_lo, 0x42b17218, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_sub_f32_e32 v3, v4, v3
v_cndmask_b32_e64 v9, 0, 0x37000000, vcc_lo
v_cmp_neq_f32_e64 vcc_lo, 0x7f800000, |v7|
v_dual_sub_f32 v10, v7, v9 :: v_dual_cndmask_b32 v3, 0, v3
v_trunc_f32_e32 v7, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_mul_f32_e32 v11, 0x3fb8aa3b, v10
v_cmp_ngt_f32_e32 vcc_lo, 0xc2ce8ed0, v10
v_add_f32_e32 v3, v9, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f32 v12, v10, 0x3fb8aa3b, -v11
v_rndne_f32_e32 v13, v11
v_dual_fmac_f32 v12, 0x32a5705f, v10 :: v_dual_sub_f32 v11, v11, v13
v_cvt_i32_f32_e32 v8, v13
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v11, v11, v12
v_exp_f32_e32 v11, v11
s_waitcnt_depctr 0xfff
v_ldexp_f32 v4, v11, v8
v_mul_f32_e32 v8, 0.5, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v4, 0, v4, vcc_lo
v_cmp_nlt_f32_e32 vcc_lo, 0x42b17218, v10
v_trunc_f32_e32 v11, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v4, 0x7f800000, v4, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, v7, v5
v_cmp_neq_f32_e64 s3, v11, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_fma_f32 v3, v4, v3, v4
v_cmp_eq_f32_e64 s4, 0x7f800000, v4
s_and_b32 s3, vcc_lo, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v5, 1.0, v2, s3
v_cndmask_b32_e64 v3, v3, v4, s4
v_cmp_eq_f32_e64 s4, 0, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_bfi_b32 v3, 0x7fffffff, v3, v5
v_cndmask_b32_e64 v4, 0x7f800000, 0, s4
v_cndmask_b32_e64 v5, 0, v2, s3
v_cmp_class_f32_e64 s3, v2, 0x204
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_cndmask_b32_e32 v7, 0x7fc00000, v3, vcc_lo
v_cmp_gt_f32_e32 vcc_lo, 0, v2
v_bfi_b32 v4, 0x7fffffff, v4, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v3, v3, v7, vcc_lo
s_or_b32 vcc_lo, s4, s3
s_cmp_lt_i32 s6, 1
v_cndmask_b32_e32 v3, v3, v4, vcc_lo
v_cmp_o_f32_e32 vcc_lo, v2, v2
v_lshl_add_u32 v4, v1, 2, 8
s_delay_alu instid0(VALU_DEP_3)
v_cndmask_b32_e32 v2, 0x7fc00000, v3, vcc_lo
ds_store_b32 v4, v2
s_cbranch_scc1 .LBB0_16
v_mul_lo_u32 v2, s6, v6
s_mov_b32 s16, s15
s_branch .LBB0_14
.LBB0_13:
s_or_b32 exec_lo, exec_lo, s17
v_add_nc_u32_e32 v2, s13, v2
s_add_i32 s16, s16, s7
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s16, s6
s_cbranch_scc1 .LBB0_16
.LBB0_14:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v3, s14, v2
s_mov_b32 s17, exec_lo
v_cmpx_gt_i32_e64 s12, v3
s_cbranch_execz .LBB0_13
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[2:3]
v_add_co_u32 v5, vcc_lo, s8, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v6, vcc_lo, s9, v6, vcc_lo
global_load_b32 v3, v[5:6], off
s_waitcnt vmcnt(0)
v_sub_f32_e32 v3, v3, v0
v_frexp_mant_f32_e64 v5, |v3|
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_gt_f32_e32 vcc_lo, 0x3f2aaaab, v5
v_cndmask_b32_e64 v6, 0, 1, vcc_lo
v_ldexp_f32 v5, v5, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v6, 1.0, v5
v_rcp_f32_e32 v7, v6
v_add_f32_e32 v10, -1.0, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_dual_add_f32 v8, -1.0, v5 :: v_dual_sub_f32 v5, v5, v10
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v9, v8, v7
v_mul_f32_e32 v11, v6, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v6, v9, v6, -v11
v_fmac_f32_e32 v6, v9, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v5, v11, v6
v_dual_sub_f32 v10, v8, v5 :: v_dual_sub_f32 v11, v5, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v6, v11, v6
v_sub_f32_e32 v8, v8, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v5, v8, v5
v_add_f32_e32 v5, v6, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v5, v10, v5
v_mul_f32_e32 v5, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v6, v9, v5
v_sub_f32_e32 v7, v6, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mul_f32 v8, v6, v6 :: v_dual_sub_f32 v5, v5, v7
v_fma_f32 v7, v6, v6, -v8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v9, v5, v5
v_fmac_f32_e32 v7, v6, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v9, v8, v7
v_fmaak_f32 v10, s5, v9, 0x3e91f4c4
v_sub_f32_e32 v8, v9, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_dual_fmaak_f32 v10, v9, v10, 0x3ecccdef :: v_dual_sub_f32 v7, v7, v8
v_mul_f32_e32 v13, v6, v9
v_mul_f32_e32 v11, v9, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v14, v9, v6, -v13
v_fma_f32 v8, v9, v10, -v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_fmac_f32_e32 v8, v7, v10
v_fmac_f32_e32 v14, v9, v5
v_ldexp_f32 v5, v5, 1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f32_e32 v10, v11, v8
v_fmac_f32_e32 v14, v7, v6
v_ldexp_f32 v6, v6, 1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_sub_f32 v11, v10, v11 :: v_dual_add_f32 v12, 0x3f2aaaaa, v10
v_dual_sub_f32 v8, v8, v11 :: v_dual_add_f32 v11, 0xbf2aaaaa, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f32_e32 v8, 0x31739010, v8
v_sub_f32_e32 v10, v10, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v8, v8, v10
v_add_f32_e32 v7, v12, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_add_f32 v9, v13, v14 :: v_dual_sub_f32 v10, v12, v7
v_mul_f32_e32 v11, v9, v7
v_sub_f32_e32 v12, v9, v13
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f32_e32 v8, v8, v10
v_fma_f32 v10, v9, v7, -v11
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v12, v14, v12
v_fmac_f32_e32 v10, v9, v8
v_frexp_exp_i32_f32_e32 v8, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fmac_f32_e32 v10, v12, v7
v_subrev_co_ci_u32_e32 v7, vcc_lo, 0, v8, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, 1.0, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f32_e32 v8, v11, v10
v_cvt_f32_i32_e32 v7, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v9, v6, v8
v_dual_sub_f32 v11, v8, v11 :: v_dual_sub_f32 v6, v9, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_sub_f32_e32 v10, v10, v11
v_mul_f32_e32 v12, 0x3f317218, v7
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_sub_f32_e32 v6, v8, v6
v_add_f32_e32 v5, v5, v10
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v11, v7, 0x3f317218, -v12
v_add_f32_e32 v5, v5, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v11, 0xb102e308, v7
v_dual_add_f32 v7, v9, v5 :: v_dual_add_f32 v6, v12, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v9, v7, v9
v_dual_sub_f32 v5, v5, v9 :: v_dual_add_f32 v8, v6, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v10, v8, v6
v_dual_sub_f32 v7, v7, v10 :: v_dual_sub_f32 v12, v6, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v11, v11, v12
v_add_f32_e32 v9, v11, v5
v_sub_f32_e32 v13, v8, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v6, v6, v13
v_dual_add_f32 v6, v7, v6 :: v_dual_sub_f32 v7, v9, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_f32_e32 v5, v5, v7
v_add_f32_e32 v6, v9, v6
v_sub_f32_e32 v9, v9, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_sub_f32 v7, v11, v9 :: v_dual_add_f32 v10, v8, v6
v_dual_add_f32 v5, v5, v7 :: v_dual_sub_f32 v8, v10, v8
v_cndmask_b32_e64 v7, 2.0, 1.0, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v6, v6, v8
v_add_f32_e32 v5, v5, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v6, v10, v5
v_sub_f32_e32 v8, v6, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_sub_f32_e32 v5, v5, v8
v_mul_f32_e32 v9, v7, v6
v_fma_f32 v6, v7, v6, -v9
v_cmp_class_f32_e64 vcc_lo, v9, 0x204
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v6, v7, v5
v_add_f32_e32 v5, v9, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v8, v5, v9, vcc_lo
v_cmp_eq_f32_e32 vcc_lo, 0x42b17218, v8
v_cndmask_b32_e64 v10, 0, 0x37000000, vcc_lo
v_cmp_neq_f32_e64 vcc_lo, 0x7f800000, |v8|
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_f32_e32 v11, v8, v10
v_trunc_f32_e32 v8, v7
v_mul_f32_e32 v12, 0x3fb8aa3b, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f32 v13, v11, 0x3fb8aa3b, -v12
v_rndne_f32_e32 v14, v12
v_dual_fmac_f32 v13, 0x32a5705f, v11 :: v_dual_sub_f32 v12, v12, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add_f32_e32 v12, v12, v13
v_sub_f32_e32 v5, v5, v9
v_cvt_i32_f32_e32 v9, v14
v_exp_f32_e32 v12, v12
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_f32_e32 v5, v6, v5
v_cndmask_b32_e32 v5, 0, v5, vcc_lo
v_cmp_ngt_f32_e32 vcc_lo, 0xc2ce8ed0, v11
s_waitcnt_depctr 0xfff
v_ldexp_f32 v6, v12, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_dual_mul_f32 v9, 0.5, v7 :: v_dual_cndmask_b32 v6, 0, v6
v_cmp_nlt_f32_e32 vcc_lo, 0x42b17218, v11
v_trunc_f32_e32 v12, v9
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_dual_add_f32 v5, v10, v5 :: v_dual_cndmask_b32 v6, 0x7f800000, v6
v_cmp_eq_f32_e32 vcc_lo, v8, v7
v_cmp_neq_f32_e64 s3, v12, v9
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_fma_f32 v5, v6, v5, v6
v_cmp_eq_f32_e64 s4, 0x7f800000, v6
s_and_b32 s3, vcc_lo, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v7, 1.0, v3, s3
v_cndmask_b32_e64 v9, 0, v3, s3
v_cndmask_b32_e64 v5, v5, v6, s4
v_cmp_eq_f32_e64 s4, 0, v3
ds_load_b32 v6, v4
v_cmp_class_f32_e64 s3, v3, 0x204
v_bfi_b32 v5, 0x7fffffff, v5, v7
v_cndmask_b32_e64 v7, 0x7f800000, 0, s4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v8, 0x7fc00000, v5, vcc_lo
v_cmp_gt_f32_e32 vcc_lo, 0, v3
v_bfi_b32 v7, 0x7fffffff, v7, v9
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v5, v5, v8, vcc_lo
s_or_b32 vcc_lo, s4, s3
v_cndmask_b32_e32 v5, v5, v7, vcc_lo
v_cmp_o_f32_e32 vcc_lo, v3, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v3, 0x7fc00000, v5, vcc_lo
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v3, v6, v3
ds_store_b32 v4, v3
s_branch .LBB0_13
.LBB0_16:
s_load_b64 s[4:5], s[0:1], 0x18
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_22
v_mov_b32_e32 v0, 0
s_cmp_lt_i32 s7, 1
ds_store_b32 v0, v0
s_cbranch_scc1 .LBB0_21
v_mov_b32_e32 v2, 0
s_mov_b32 s2, 8
s_mov_b32 s13, s7
.LBB0_19:
v_mov_b32_e32 v3, s2
s_add_i32 s13, s13, -1
s_add_i32 s2, s2, 4
s_cmp_eq_u32 s13, 0
ds_load_b32 v3, v3
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v2, v3, v2
s_cbranch_scc0 .LBB0_19
v_mov_b32_e32 v3, 0
ds_store_b32 v3, v2
.LBB0_21:
ds_load_b32 v2, v0
v_cvt_f32_i32_e32 v3, s6
s_lshl_b64 s[16:17], s[14:15], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s16, s4, s16
s_addc_u32 s17, s5, s17
v_div_scale_f32 v4, null, v3, v3, v2
v_div_scale_f32 v7, vcc_lo, v2, v3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v5, v4
s_waitcnt_depctr 0xfff
v_fma_f32 v6, -v4, v5, 1.0
v_fmac_f32_e32 v5, v6, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v6, v7, v5
v_fma_f32 v8, -v4, v6, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v6, v8, v5
v_fma_f32 v4, -v4, v6, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f32 v4, v4, v5, v6
v_div_fixup_f32 v2, v4, v3, v2
global_store_b32 v0, v2, s[16:17]
.LBB0_22:
s_or_b32 exec_lo, exec_lo, s3
s_mul_i32 s6, s14, s6
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v3, 0 :: v_dual_add_nc_u32 v2, s6, v1
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_lshl_b64 s[2:3], s[14:15], 2
v_lshlrev_b64 v[4:5], 2, v[2:3]
s_add_u32 s2, s4, s2
s_addc_u32 s3, s5, s3
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x8
s_load_b32 s1, s[0:1], 0x28
v_add_co_u32 v6, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v5, vcc_lo
global_load_b32 v0, v[6:7], off
s_clause 0x1
global_load_b32 v2, v3, s[10:11]
global_load_b32 v6, v3, s[2:3]
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s1, 1
s_waitcnt vmcnt(1)
v_sub_f32_e32 v0, v0, v2
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v2, null, v6, v6, v0
v_div_scale_f32 v9, vcc_lo, v0, v6, v0
v_rcp_f32_e32 v7, v2
s_waitcnt_depctr 0xfff
v_fma_f32 v8, -v2, v7, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v7, v8, v7
v_mul_f32_e32 v8, v9, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v10, -v2, v8, v9
v_fmac_f32_e32 v8, v10, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v2, -v2, v8, v9
v_div_fmas_f32 v2, v2, v7, v8
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_div_fixup_f32 v0, v2, v6, v0
global_store_b32 v[4:5], v0, off
s_cbranch_scc1 .LBB0_27
v_add3_u32 v2, s6, s7, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[4:5], 2, v[2:3]
v_cmp_gt_u32_e64 s0, s12, v2
v_add_co_u32 v0, vcc_lo, s8, v4
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_25
.p2align 6
.LBB0_24:
s_or_b32 exec_lo, exec_lo, s4
s_add_i32 s1, s1, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_27
.LBB0_25:
s_and_saveexec_b32 s4, s0
s_cbranch_execz .LBB0_24
global_load_b32 v2, v[0:1], off
s_clause 0x1
global_load_b32 v6, v3, s[10:11]
global_load_b32 v7, v3, s[2:3]
s_waitcnt vmcnt(1)
v_sub_f32_e32 v2, v2, v6
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f32 v6, null, v7, v7, v2
v_rcp_f32_e32 v8, v6
s_waitcnt_depctr 0xfff
v_fma_f32 v9, -v6, v8, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v8, v9, v8
v_div_scale_f32 v9, vcc_lo, v2, v7, v2
v_mul_f32_e32 v10, v9, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v11, -v6, v10, v9
v_fmac_f32_e32 v10, v11, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v6, -v6, v10, v9
v_div_fmas_f32 v6, v6, v8, v10
s_delay_alu instid0(VALU_DEP_1)
v_div_fixup_f32 v2, v6, v7, v2
global_store_b32 v[4:5], v2, off
s_branch .LBB0_24
.LBB0_27:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12Mean_SD_NormPfS_S_S_iii
.amdhsa_group_segment_fixed_size 8
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12Mean_SD_NormPfS_S_S_iii, .Lfunc_end0-_Z12Mean_SD_NormPfS_S_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
- .offset: 168
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 8
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12Mean_SD_NormPfS_S_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z12Mean_SD_NormPfS_S_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#define MAXN 8000 /* Max value of N */
int N; /* Matrix Dimension*/
int numThreads; /* Number of Threads */
/*Random*/
#define randm() 4|2[uid]&3
/*CUDA Function for calculating mean column-wise and then reducing each column's totals*/
/*This Function will be called Number of blocks times*/
__global__ void Mean_SD_Norm(float* input,float* output ,float* mean_out,float* sd_out, int dim1, int numThread,int eval_ceil)
{
extern __shared__ float mean[];//shared 1D-matrix for storing temporary results for mean of each threads
extern __shared__ float sd[];//shared 1D-matrix for storing temporary results for sd of each threads
__shared__ float meansum;//shared 1D-matrix for storing mean total of each threads
__shared__ float sdsum;//shared 1D-matrix for storing SD total of each threads
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;//Getting Thread X Index for Particular Block
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;//Getting Thread Y Index for Particular Block
int eva_block,index;
unsigned int thread_id = threadIdx.y;//Getting Id of thread
unsigned int j = idx_y * dim1 + idx_x;//calculating index for input matrix
__syncthreads();//waiting for all threads
mean[thread_id]=input[j];//Assigned each column element of matrix to each thread
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);//calculating index of remaining element
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
mean[thread_id]+=input[index];
}
}
/*Reducing sum of each thread to final block sum*/
if(thread_id==0)
{
for(int i=0;i<numThread;i++)
{
meansum+=mean[thread_id+i];
}
mean_out[blockIdx.x]=meansum/dim1;//Mean of block
}
__syncthreads();
sd[thread_id] = powf(input[j] - mean_out[blockIdx.x], 2.0);//evaluating SD for each thread for particular block
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
sd[thread_id]+=powf(input[index] - mean_out[blockIdx.x], 2.0);
}
}
/*Reducing SD Sum of each thread to final block SD sum*/
if(thread_id==0)
{
sdsum=0;
for(int i=0;i<numThread;i++)
{
sdsum+=sd[thread_id+i];//calculating index of remaining element
}
sd_out[blockIdx.x]=sdsum/dim1;//SD of block
}
__syncthreads();//waiting for threads
/*Normalization of each block data on basis of mean and sd of each block*/
output[blockIdx.x*dim1+thread_id] = (input[thread_id+blockIdx.x*dim1] - mean_out[blockIdx.x]) / sd_out[blockIdx.x];
/*Reducing Normalized Sum for remaining elements*/
for(int i=0;i<eval_ceil;i++){
if((numThread+thread_id)+blockIdx.x*dim1 < dim1*dim1)
{
output[(numThread+thread_id)+blockIdx.x*dim1] = (input[(numThread+thread_id)+blockIdx.x*dim1] - mean_out[blockIdx.x])/sd_out[blockIdx.x];//Normalizing the Matrix Indexes
}
}
}
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 4) {
seed = atoi(argv[3]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 3) {
N = atoi(argv[1]);
numThreads = atoi(argv[2]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
/*Number of Threads should be less than or equal to 1024 else exit*/
if (numThreads > 1024)
{
printf("Number of threads cannot be more than %i.\n", 1024);
exit(0);
}
}
else
{
printf("Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n",argv[0]);
exit(0);
}
printf("\nMatrix dimension N = %i.\n", N);
}
int main(int argc, char **argv)
{
/* Timing variables */
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
float* Host_Input = new float [N * N];//Input Matrix
float* Host_Output = new float [N * N];//Output Matrix
int i,j;
/*Initializing Input Matrix with random values*/
printf("\nInitializing...\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
//Host_Input[j* N + i] = j+1;
Host_Input[j* N + i] = (float)rand() / 32768.0;
}
}
float* input;//Device Input Matrix
float* output;//Device Output Matrix
float* mean_out;//Device Mean Matrix
float* sd_out;//Device SD Matrix
size_t matrix_size_2d = N * N * sizeof(float);//Size of 2D Matrix
size_t matrix_size_1d = N * sizeof(float);//Size of 1D Matrix
//allocated the device memory for source array
hipMalloc(&input, matrix_size_2d);
hipMemcpy(input, Host_Input, matrix_size_2d, hipMemcpyHostToDevice);
//allocate the device memory for destination array
hipMalloc(&output, matrix_size_2d);
//allocate the device memory for mean array
hipMalloc(&mean_out, matrix_size_1d);
//allocate the device memory for sd array
hipMalloc(&sd_out, matrix_size_1d);
dim3 dimBlock;
dim3 dimGrid;
/* Designing Decisions for number of blocks and number of threads in each block */
if( N < numThreads)
{
dimBlock.x = 1;
dimBlock.y = N;
dimGrid.x = N;
dimGrid.y = 1;
}
else
{
dimBlock.x = 1;
dimBlock.y = numThreads;
dimGrid.x = N;
dimGrid.y = 1;
}
/* Start Clock */
printf("\nStarting clock.\n");
hipEventRecord(start);
gettimeofday(&etstart,&tzdummy);
etstart2 = times(&cputstart);
double d_ceil=(double)N/(double)numThreads;
int c=ceil(d_ceil);
//printf("nt=%d\t c1=%ld\tc=%d\n",nt,c1,c);
//Calling CUDA Kernel Function For Normalizing Matrix
Mean_SD_Norm<<<dimGrid, dimBlock, matrix_size_1d>>>(input,output,mean_out,sd_out,N,numThreads,c);
hipDeviceSynchronize();
/* Stop Clock code below*/
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
/*Copying Output Device Matrix to Output Host Matrix*/
hipMemcpy(Host_Output, output, N * N * sizeof(float), hipMemcpyDeviceToHost);
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
/* if (N < 10) {
printf("\nB1 =\n\t");
for (i= 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%1.10f%s", Host_Output[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}*/
/* Display result time */
printf("\nElapsed time CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000);
printf("Elapsed GPU Time = %g ms \n",milliseconds);
printf("Effective Bandwidth in (GB/s): %f \n", (2*matrix_size_2d/milliseconds)/1e6);
float mean = N * log2((float)N) + N;
float sd = N * log2((float)N) + (2*N) + (2*N*N);
float norm = 2 * N * N;
printf("Effective Throughput in (GFLOPS/s): %f \n", ((mean+sd+norm)*1e-9)/(milliseconds*1e-3));
//deallocate device memory below
hipFree(input);
hipFree(output);
hipFree(mean_out);
hipFree(sd_out);
//deallocate Host Input and Host Output Matrix
free(Host_Input);
free(Host_Output);
exit(0);
} | .text
.file "matrix_cuda_norm.hip"
.globl _Z27__device_stub__Mean_SD_NormPfS_S_S_iii # -- Begin function _Z27__device_stub__Mean_SD_NormPfS_S_S_iii
.p2align 4, 0x90
.type _Z27__device_stub__Mean_SD_NormPfS_S_S_iii,@function
_Z27__device_stub__Mean_SD_NormPfS_S_S_iii: # @_Z27__device_stub__Mean_SD_NormPfS_S_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12Mean_SD_NormPfS_S_S_iii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z27__device_stub__Mean_SD_NormPfS_S_S_iii, .Lfunc_end0-_Z27__device_stub__Mean_SD_NormPfS_S_S_iii
.cfi_endproc
# -- End function
.globl _Z9time_seedv # -- Begin function _Z9time_seedv
.p2align 4, 0x90
.type _Z9time_seedv,@function
_Z9time_seedv: # @_Z9time_seedv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %rsp, %rdi
leaq 16(%rsp), %rsi
callq gettimeofday
movl 8(%rsp), %eax
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9time_seedv, .Lfunc_end1-_Z9time_seedv
.cfi_endproc
# -- End function
.globl _Z10parametersiPPc # -- Begin function _Z10parametersiPPc
.p2align 4, 0x90
.type _Z10parametersiPPc,@function
_Z10parametersiPPc: # @_Z10parametersiPPc
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $32, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
leaq 8(%rsp), %rdi
leaq 24(%rsp), %rsi
callq gettimeofday
movl 16(%rsp), %edi
callq srand
cmpl $4, %ebp
jne .LBB2_2
# %bb.1:
movq 24(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movl %r14d, %edi
callq srand
movl $.L.str, %edi
movl %r14d, %esi
xorl %eax, %eax
callq printf
.LBB2_2:
cmpl $3, %ebp
jl .LBB2_8
# %bb.3:
movq 8(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movl %r14d, N(%rip)
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, numThreads(%rip)
leal -8001(%r14), %ecx
cmpl $-8001, %ecx # imm = 0xE0BF
jbe .LBB2_4
# %bb.6:
cmpl $1025, %eax # imm = 0x401
jge .LBB2_7
# %bb.9:
movl $.L.str.4, %edi
movl %r14d, %esi
xorl %eax, %eax
addq $32, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.LBB2_8:
.cfi_def_cfa_offset 64
movq (%rbx), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
xorl %edi, %edi
callq exit
.LBB2_4:
movl $.L.str.1, %edi
movl %r14d, %esi
jmp .LBB2_5
.LBB2_7:
movl $.L.str.2, %edi
movl $1024, %esi # imm = 0x400
.LBB2_5:
xorl %eax, %eax
callq printf
xorl %edi, %edi
callq exit
.Lfunc_end2:
.size _Z10parametersiPPc, .Lfunc_end2-_Z10parametersiPPc
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x38000000 # float 3.05175781E-5
.LCPI3_1:
.long 0x447a0000 # float 1000
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI3_2:
.quad 0x412e848000000000 # double 1.0E+6
.LCPI3_3:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.LCPI3_4:
.quad 0x3f50624dd2f1a9fc # double 0.001
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 336
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
leaq 104(%rsp), %rdi
callq hipEventCreate
leaq 48(%rsp), %rdi
callq hipEventCreate
movl %ebp, %edi
movq %rbx, %rsi
callq _Z10parametersiPPc
movl N(%rip), %r14d
imull %r14d, %r14d
shlq $2, %r14
movq %r14, %rdi
callq _Znam
movq %rax, %rbx
movq %r14, %rdi
callq _Znam
movq %rax, %r14
movl $.Lstr, %edi
callq puts@PLT
movl N(%rip), %ebp
testl %ebp, %ebp
jle .LBB3_6
# %bb.1: # %.preheader.preheader
xorl %r15d, %r15d
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_5: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
incl %r15d
movl N(%rip), %ebp
cmpl %ebp, %r15d
jge .LBB3_6
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
cmpl $0, N(%rip)
jle .LBB3_5
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB3_2 Depth=1
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB3_4: # %.lr.ph
# Parent Loop BB3_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI3_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movl N(%rip), %eax
imull %ebp, %eax
addl %r15d, %eax
cltq
movss %xmm0, (%rbx,%rax,4)
incl %ebp
cmpl N(%rip), %ebp
jl .LBB3_4
jmp .LBB3_5
.LBB3_6: # %._crit_edge68
movq %r14, 112(%rsp) # 8-byte Spill
movslq %ebp, %r15
imull %ebp, %ebp
leaq (,%rbp,4), %r12
shlq $2, %r15
leaq 40(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movq 40(%rsp), %rdi
movq %rbx, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 32(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
leaq 80(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 72(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movl N(%rip), %eax
movl numThreads(%rip), %r12d
cmpl %r12d, %eax
cmovll %eax, %r12d
movabsq $4294967296, %r13 # imm = 0x100000000
orq %rax, %r13
shlq $32, %r12
orq $1, %r12
movl $.Lstr.1, %edi
callq puts@PLT
movq 104(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
leaq 176(%rsp), %rdi
leaq 168(%rsp), %rsi
callq gettimeofday
leaq 248(%rsp), %rdi
callq times
movl N(%rip), %r14d
movl numThreads(%rip), %eax
movl %eax, 12(%rsp) # 4-byte Spill
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
movq %r15, %r8
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_8
# %bb.7:
cvtsi2sd %r14d, %xmm0
cvtsi2sdl 12(%rsp), %xmm1 # 4-byte Folded Reload
divsd %xmm1, %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %eax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 80(%rsp), %rsi
movq 72(%rsp), %rdi
movl N(%rip), %r8d
movl numThreads(%rip), %r9d
movq %rcx, 160(%rsp)
movq %rdx, 152(%rsp)
movq %rsi, 144(%rsp)
movq %rdi, 136(%rsp)
movl %r8d, 68(%rsp)
movl %r9d, 64(%rsp)
movl %eax, 60(%rsp)
leaq 160(%rsp), %rax
movq %rax, 192(%rsp)
leaq 152(%rsp), %rax
movq %rax, 200(%rsp)
leaq 144(%rsp), %rax
movq %rax, 208(%rsp)
leaq 136(%rsp), %rax
movq %rax, 216(%rsp)
leaq 68(%rsp), %rax
movq %rax, 224(%rsp)
leaq 64(%rsp), %rax
movq %rax, 232(%rsp)
leaq 60(%rsp), %rax
movq %rax, 240(%rsp)
leaq 88(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 128(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 192(%rsp), %r9
movl $_Z12Mean_SD_NormPfS_S_S_iii, %edi
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
pushq 136(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_8:
callq hipDeviceSynchronize
movq 48(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 48(%rsp), %rdi
callq hipEventSynchronize
movl $0, 16(%rsp)
movq 104(%rsp), %rsi
movq 48(%rsp), %rdx
leaq 16(%rsp), %rdi
callq hipEventElapsedTime
leaq 88(%rsp), %rdi
leaq 168(%rsp), %rsi
callq gettimeofday
leaq 192(%rsp), %rdi
callq times
movl $.Lstr.2, %edi
callq puts@PLT
movq 32(%rsp), %rsi
movl N(%rip), %edx
imull %edx, %edx
shlq $2, %rdx
movq 112(%rsp), %r14 # 8-byte Reload
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
movq 88(%rsp), %rcx
movq 96(%rsp), %rax
subq 176(%rsp), %rcx
imulq $1000000, %rcx, %rcx # imm = 0xF4240
subq 184(%rsp), %rax
addq %rcx, %rax
js .LBB3_9
# %bb.10:
cvtsi2ss %rax, %xmm0
jmp .LBB3_11
.LBB3_9:
movq %rax, %rcx
shrq %rcx
andl $1, %eax
orq %rcx, %rax
cvtsi2ss %rax, %xmm0
addss %xmm0, %xmm0
.LBB3_11:
divss .LCPI3_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
movss 16(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.9, %edi
movb $1, %al
callq printf
shlq $3, %rbp
xorps %xmm0, %xmm0
cvtsi2ss %rbp, %xmm0
divss 16(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
divsd .LCPI3_2(%rip), %xmm0
movl $.L.str.10, %edi
movb $1, %al
callq printf
xorps %xmm0, %xmm0
cvtsi2ssl N(%rip), %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
callq _ZSt4log2f
mulss 12(%rsp), %xmm0 # 4-byte Folded Reload
cvtsi2ssl N(%rip), %xmm1
movss %xmm1, 56(%rsp) # 4-byte Spill
addss %xmm1, %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
movaps %xmm1, %xmm0
callq _ZSt4log2f
mulss 56(%rsp), %xmm0 # 4-byte Folded Reload
movl N(%rip), %eax
leal (%rax,%rax), %ecx
xorps %xmm1, %xmm1
cvtsi2ss %ecx, %xmm1
imull %eax, %ecx
cvtsi2ss %ecx, %xmm2
addss %xmm0, %xmm1
addss %xmm2, %xmm1
addss 12(%rsp), %xmm1 # 4-byte Folded Reload
addss %xmm2, %xmm1
xorps %xmm0, %xmm0
cvtss2sd %xmm1, %xmm0
mulsd .LCPI3_3(%rip), %xmm0
movss 16(%rsp), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
mulsd .LCPI3_4(%rip), %xmm1
divsd %xmm1, %xmm0
movl $.L.str.11, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 80(%rsp), %rdi
callq hipFree
movq 72(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %edi, %edi
callq exit
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.section .text._ZSt4log2f,"axG",@progbits,_ZSt4log2f,comdat
.weak _ZSt4log2f # -- Begin function _ZSt4log2f
.p2align 4, 0x90
.type _ZSt4log2f,@function
_ZSt4log2f: # @_ZSt4log2f
.cfi_startproc
# %bb.0:
jmp log2f # TAILCALL
.Lfunc_end4:
.size _ZSt4log2f, .Lfunc_end4-_ZSt4log2f
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12Mean_SD_NormPfS_S_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type N,@object # @N
.bss
.globl N
.p2align 2, 0x0
N:
.long 0 # 0x0
.size N, 4
.type numThreads,@object # @numThreads
.globl numThreads
.p2align 2, 0x0
numThreads:
.long 0 # 0x0
.size numThreads, 4
.type _Z12Mean_SD_NormPfS_S_S_iii,@object # @_Z12Mean_SD_NormPfS_S_S_iii
.section .rodata,"a",@progbits
.globl _Z12Mean_SD_NormPfS_S_S_iii
.p2align 3, 0x0
_Z12Mean_SD_NormPfS_S_S_iii:
.quad _Z27__device_stub__Mean_SD_NormPfS_S_S_iii
.size _Z12Mean_SD_NormPfS_S_S_iii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Random seed = %i\n"
.size .L.str, 18
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "N = %i is out of range.\n"
.size .L.str.1, 25
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Number of threads cannot be more than %i.\n"
.size .L.str.2, 43
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n"
.size .L.str.3, 64
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\nMatrix dimension N = %i.\n"
.size .L.str.4, 27
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\nElapsed time CPU Time = %g ms.\n"
.size .L.str.8, 33
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Elapsed GPU Time = %g ms \n"
.size .L.str.9, 27
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Effective Bandwidth in (GB/s): %f \n"
.size .L.str.10, 36
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Effective Throughput in (GFLOPS/s): %f \n"
.size .L.str.11, 42
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12Mean_SD_NormPfS_S_S_iii"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\nInitializing..."
.size .Lstr, 17
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "\nStarting clock."
.size .Lstr.1, 17
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Stopped clock."
.size .Lstr.2, 15
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__Mean_SD_NormPfS_S_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12Mean_SD_NormPfS_S_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00051273_00000000-6_matrix_cuda_norm.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2075:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9time_seedv
.type _Z9time_seedv, @function
_Z9time_seedv:
.LFB2070:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rsi
leaq 16(%rsp), %rdi
call gettimeofday@PLT
movl 24(%rsp), %eax
movq 40(%rsp), %rdx
subq %fs:40, %rdx
jne .L6
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size _Z9time_seedv, .-_Z9time_seedv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Random seed = %i\n"
.LC1:
.string "N = %i is out of range.\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "Number of threads cannot be more than %i.\n"
.align 8
.LC3:
.string "Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n"
.section .rodata.str1.1
.LC4:
.string "\nMatrix dimension N = %i.\n"
.text
.globl _Z10parametersiPPc
.type _Z10parametersiPPc, @function
_Z10parametersiPPc:
.LFB2071:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movl %edi, %ebx
movq %rsi, %rbp
call _Z9time_seedv
movl %eax, %edi
call srand@PLT
cmpl $4, %ebx
je .L14
cmpl $2, %ebx
jg .L9
movq 0(%rbp), %rdx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call exit@PLT
.L14:
movq 24(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %ebx
movl %eax, %edi
call srand@PLT
movl %ebx, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L9:
movq 8(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, N(%rip)
movq 16(%rbp), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, numThreads(%rip)
movl N(%rip), %edx
leal -1(%rdx), %ecx
cmpl $7999, %ecx
ja .L15
cmpl $1024, %eax
jle .L12
movl $1024, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call exit@PLT
.L15:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call exit@PLT
.L12:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2071:
.size _Z10parametersiPPc, .-_Z10parametersiPPc
.globl _Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii
.type _Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii, @function
_Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii:
.LFB2097:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 192(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z12Mean_SD_NormPfS_S_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii, .-_Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii
.globl _Z12Mean_SD_NormPfS_S_S_iii
.type _Z12Mean_SD_NormPfS_S_S_iii, @function
_Z12Mean_SD_NormPfS_S_S_iii:
.LFB2098:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _Z12Mean_SD_NormPfS_S_S_iii, .-_Z12Mean_SD_NormPfS_S_S_iii
.section .rodata.str1.1
.LC5:
.string "\nInitializing...\n"
.LC7:
.string "\nStarting clock.\n"
.LC9:
.string "Stopped clock.\n"
.section .rodata.str1.8
.align 8
.LC11:
.string "\nElapsed time CPU Time = %g ms.\n"
.section .rodata.str1.1
.LC12:
.string "Elapsed GPU Time = %g ms \n"
.section .rodata.str1.8
.align 8
.LC14:
.string "Effective Bandwidth in (GB/s): %f \n"
.align 8
.LC17:
.string "Effective Throughput in (GFLOPS/s): %f \n"
.text
.globl main
.type main, @function
main:
.LFB2072:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $232, %rsp
.cfi_def_cfa_offset 288
movl %edi, %ebx
movq %rsi, %rbp
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movq %rbp, %rsi
movl %ebx, %edi
call _Z10parametersiPPc
movl N(%rip), %eax
imull %eax, %eax
movslq %eax, %rdi
salq $2, %rdi
call _Znam@PLT
movq %rax, %r12
movl N(%rip), %eax
imull %eax, %eax
movslq %eax, %rdi
salq $2, %rdi
call _Znam@PLT
movq %rax, %r13
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl N(%rip), %eax
movl $0, %ebp
testl %eax, %eax
jg .L25
.L26:
movl %eax, %ebp
imull %eax, %ebp
movslq %ebp, %rbp
leaq 0(,%rbp,4), %r14
cltq
leaq 0(,%rax,4), %rbx
leaq 48(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r14, %rdx
movq %r12, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
leaq 56(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, 88(%rsp)
movl $1, 96(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl N(%rip), %edx
movl numThreads(%rip), %eax
movl %edx, %ecx
cmpl %eax, %edx
jge .L30
movl %edx, %eax
.L30:
movl %eax, 92(%rsp)
movl %ecx, 100(%rsp)
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
leaq 80(%rsp), %rsi
leaq 112(%rsp), %rdi
call gettimeofday@PLT
leaq 144(%rsp), %rdi
call times@PLT
movl N(%rip), %r15d
movl numThreads(%rip), %r14d
movl 96(%rsp), %ecx
movl $0, %r9d
movq %rbx, %r8
movq 88(%rsp), %rdx
movq 100(%rsp), %rdi
movl 108(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L43
.L31:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 28(%rsp)
leaq 28(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 80(%rsp), %rsi
leaq 128(%rsp), %rdi
call gettimeofday@PLT
leaq 176(%rsp), %rdi
call times@PLT
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl N(%rip), %eax
imull %eax, %eax
movslq %eax, %rdx
salq $2, %rdx
movl $2, %ecx
movq 56(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
imulq $1000000, 128(%rsp), %rax
subq 120(%rsp), %rax
addq 136(%rsp), %rax
imulq $1000000, 112(%rsp), %rdx
subq %rdx, %rax
js .L32
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
.L33:
divss .LC10(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 28(%rsp), %xmm0
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
salq $3, %rbp
js .L34
pxor %xmm0, %xmm0
cvtsi2ssq %rbp, %xmm0
.L35:
divss 28(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
divsd .LC13(%rip), %xmm0
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl N(%rip), %ebx
pxor %xmm3, %xmm3
cvtsi2ssl %ebx, %xmm3
movss %xmm3, 8(%rsp)
movaps %xmm3, %xmm0
call log2f@PLT
movss %xmm0, 12(%rsp)
movss 8(%rsp), %xmm0
call log2f@PLT
movl %ebx, %eax
imull %ebx, %eax
addl %eax, %eax
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
addl %ebx, %ebx
pxor %xmm1, %xmm1
cvtsi2ssl %ebx, %xmm1
movss 8(%rsp), %xmm3
mulss %xmm3, %xmm0
addss %xmm1, %xmm0
addss %xmm2, %xmm0
movss 12(%rsp), %xmm1
mulss %xmm3, %xmm1
addss %xmm3, %xmm1
addss %xmm1, %xmm0
addss %xmm2, %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LC15(%rip), %xmm0
pxor %xmm1, %xmm1
cvtss2sd 28(%rsp), %xmm1
mulsd .LC16(%rip), %xmm1
divsd %xmm1, %xmm0
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movl $0, %edi
call exit@PLT
.L27:
call rand@PLT
movl %eax, %ecx
movl N(%rip), %edx
movl %edx, %eax
imull %ebx, %eax
addl %ebp, %eax
cltq
pxor %xmm0, %xmm0
cvtsi2ssl %ecx, %xmm0
mulss .LC6(%rip), %xmm0
movss %xmm0, (%r12,%rax,4)
addl $1, %ebx
cmpl %ebx, %edx
jg .L27
.L28:
addl $1, %ebp
movl N(%rip), %eax
cmpl %ebp, %eax
jle .L26
.L25:
movl $0, %ebx
cmpl $0, N(%rip)
jg .L27
jmp .L28
.L43:
pxor %xmm0, %xmm0
cvtsi2sdl %r15d, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdl %r14d, %xmm1
divsd %xmm1, %xmm0
call ceil@PLT
subq $8, %rsp
.cfi_def_cfa_offset 296
cvttsd2sil %xmm0, %eax
pushq %rax
.cfi_def_cfa_offset 304
movl numThreads(%rip), %r9d
movl N(%rip), %r8d
movq 88(%rsp), %rcx
movq 80(%rsp), %rdx
movq 72(%rsp), %rsi
movq 64(%rsp), %rdi
call _Z41__device_stub__Z12Mean_SD_NormPfS_S_S_iiiPfS_S_S_iii
addq $16, %rsp
.cfi_def_cfa_offset 288
jmp .L31
.L32:
movq %rax, %rdx
shrq %rdx
andl $1, %eax
orq %rax, %rdx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
addss %xmm0, %xmm0
jmp .L33
.L34:
shrq %rbp
pxor %xmm0, %xmm0
cvtsi2ssq %rbp, %xmm0
addss %xmm0, %xmm0
jmp .L35
.cfi_endproc
.LFE2072:
.size main, .-main
.section .rodata.str1.1
.LC18:
.string "_Z12Mean_SD_NormPfS_S_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z12Mean_SD_NormPfS_S_S_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2100:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl numThreads
.bss
.align 4
.type numThreads, @object
.size numThreads, 4
numThreads:
.zero 4
.globl N
.align 4
.type N, @object
.size N, 4
N:
.zero 4
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC6:
.long 939524096
.align 4
.LC10:
.long 1148846080
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC13:
.long 0
.long 1093567616
.align 8
.LC15:
.long -400107883
.long 1041313291
.align 8
.LC16:
.long -755914244
.long 1062232653
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matrix_cuda_norm.hip"
.globl _Z27__device_stub__Mean_SD_NormPfS_S_S_iii # -- Begin function _Z27__device_stub__Mean_SD_NormPfS_S_S_iii
.p2align 4, 0x90
.type _Z27__device_stub__Mean_SD_NormPfS_S_S_iii,@function
_Z27__device_stub__Mean_SD_NormPfS_S_S_iii: # @_Z27__device_stub__Mean_SD_NormPfS_S_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 160(%rsp), %rax
movq %rax, 144(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12Mean_SD_NormPfS_S_S_iii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z27__device_stub__Mean_SD_NormPfS_S_S_iii, .Lfunc_end0-_Z27__device_stub__Mean_SD_NormPfS_S_S_iii
.cfi_endproc
# -- End function
.globl _Z9time_seedv # -- Begin function _Z9time_seedv
.p2align 4, 0x90
.type _Z9time_seedv,@function
_Z9time_seedv: # @_Z9time_seedv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %rsp, %rdi
leaq 16(%rsp), %rsi
callq gettimeofday
movl 8(%rsp), %eax
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9time_seedv, .Lfunc_end1-_Z9time_seedv
.cfi_endproc
# -- End function
.globl _Z10parametersiPPc # -- Begin function _Z10parametersiPPc
.p2align 4, 0x90
.type _Z10parametersiPPc,@function
_Z10parametersiPPc: # @_Z10parametersiPPc
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $32, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
leaq 8(%rsp), %rdi
leaq 24(%rsp), %rsi
callq gettimeofday
movl 16(%rsp), %edi
callq srand
cmpl $4, %ebp
jne .LBB2_2
# %bb.1:
movq 24(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movl %r14d, %edi
callq srand
movl $.L.str, %edi
movl %r14d, %esi
xorl %eax, %eax
callq printf
.LBB2_2:
cmpl $3, %ebp
jl .LBB2_8
# %bb.3:
movq 8(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r14
movl %r14d, N(%rip)
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, numThreads(%rip)
leal -8001(%r14), %ecx
cmpl $-8001, %ecx # imm = 0xE0BF
jbe .LBB2_4
# %bb.6:
cmpl $1025, %eax # imm = 0x401
jge .LBB2_7
# %bb.9:
movl $.L.str.4, %edi
movl %r14d, %esi
xorl %eax, %eax
addq $32, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.LBB2_8:
.cfi_def_cfa_offset 64
movq (%rbx), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
xorl %edi, %edi
callq exit
.LBB2_4:
movl $.L.str.1, %edi
movl %r14d, %esi
jmp .LBB2_5
.LBB2_7:
movl $.L.str.2, %edi
movl $1024, %esi # imm = 0x400
.LBB2_5:
xorl %eax, %eax
callq printf
xorl %edi, %edi
callq exit
.Lfunc_end2:
.size _Z10parametersiPPc, .Lfunc_end2-_Z10parametersiPPc
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x38000000 # float 3.05175781E-5
.LCPI3_1:
.long 0x447a0000 # float 1000
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI3_2:
.quad 0x412e848000000000 # double 1.0E+6
.LCPI3_3:
.quad 0x3e112e0be826d695 # double 1.0000000000000001E-9
.LCPI3_4:
.quad 0x3f50624dd2f1a9fc # double 0.001
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $280, %rsp # imm = 0x118
.cfi_def_cfa_offset 336
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
leaq 104(%rsp), %rdi
callq hipEventCreate
leaq 48(%rsp), %rdi
callq hipEventCreate
movl %ebp, %edi
movq %rbx, %rsi
callq _Z10parametersiPPc
movl N(%rip), %r14d
imull %r14d, %r14d
shlq $2, %r14
movq %r14, %rdi
callq _Znam
movq %rax, %rbx
movq %r14, %rdi
callq _Znam
movq %rax, %r14
movl $.Lstr, %edi
callq puts@PLT
movl N(%rip), %ebp
testl %ebp, %ebp
jle .LBB3_6
# %bb.1: # %.preheader.preheader
xorl %r15d, %r15d
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_5: # %._crit_edge
# in Loop: Header=BB3_2 Depth=1
incl %r15d
movl N(%rip), %ebp
cmpl %ebp, %r15d
jge .LBB3_6
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_4 Depth 2
cmpl $0, N(%rip)
jle .LBB3_5
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB3_2 Depth=1
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB3_4: # %.lr.ph
# Parent Loop BB3_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI3_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movl N(%rip), %eax
imull %ebp, %eax
addl %r15d, %eax
cltq
movss %xmm0, (%rbx,%rax,4)
incl %ebp
cmpl N(%rip), %ebp
jl .LBB3_4
jmp .LBB3_5
.LBB3_6: # %._crit_edge68
movq %r14, 112(%rsp) # 8-byte Spill
movslq %ebp, %r15
imull %ebp, %ebp
leaq (,%rbp,4), %r12
shlq $2, %r15
leaq 40(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
movq 40(%rsp), %rdi
movq %rbx, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 32(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
leaq 80(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 72(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movl N(%rip), %eax
movl numThreads(%rip), %r12d
cmpl %r12d, %eax
cmovll %eax, %r12d
movabsq $4294967296, %r13 # imm = 0x100000000
orq %rax, %r13
shlq $32, %r12
orq $1, %r12
movl $.Lstr.1, %edi
callq puts@PLT
movq 104(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
leaq 176(%rsp), %rdi
leaq 168(%rsp), %rsi
callq gettimeofday
leaq 248(%rsp), %rdi
callq times
movl N(%rip), %r14d
movl numThreads(%rip), %eax
movl %eax, 12(%rsp) # 4-byte Spill
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
movq %r15, %r8
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_8
# %bb.7:
cvtsi2sd %r14d, %xmm0
cvtsi2sdl 12(%rsp), %xmm1 # 4-byte Folded Reload
divsd %xmm1, %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %eax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 80(%rsp), %rsi
movq 72(%rsp), %rdi
movl N(%rip), %r8d
movl numThreads(%rip), %r9d
movq %rcx, 160(%rsp)
movq %rdx, 152(%rsp)
movq %rsi, 144(%rsp)
movq %rdi, 136(%rsp)
movl %r8d, 68(%rsp)
movl %r9d, 64(%rsp)
movl %eax, 60(%rsp)
leaq 160(%rsp), %rax
movq %rax, 192(%rsp)
leaq 152(%rsp), %rax
movq %rax, 200(%rsp)
leaq 144(%rsp), %rax
movq %rax, 208(%rsp)
leaq 136(%rsp), %rax
movq %rax, 216(%rsp)
leaq 68(%rsp), %rax
movq %rax, 224(%rsp)
leaq 64(%rsp), %rax
movq %rax, 232(%rsp)
leaq 60(%rsp), %rax
movq %rax, 240(%rsp)
leaq 88(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 128(%rsp), %rdx
leaq 120(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 192(%rsp), %r9
movl $_Z12Mean_SD_NormPfS_S_S_iii, %edi
pushq 120(%rsp)
.cfi_adjust_cfa_offset 8
pushq 136(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_8:
callq hipDeviceSynchronize
movq 48(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 48(%rsp), %rdi
callq hipEventSynchronize
movl $0, 16(%rsp)
movq 104(%rsp), %rsi
movq 48(%rsp), %rdx
leaq 16(%rsp), %rdi
callq hipEventElapsedTime
leaq 88(%rsp), %rdi
leaq 168(%rsp), %rsi
callq gettimeofday
leaq 192(%rsp), %rdi
callq times
movl $.Lstr.2, %edi
callq puts@PLT
movq 32(%rsp), %rsi
movl N(%rip), %edx
imull %edx, %edx
shlq $2, %rdx
movq 112(%rsp), %r14 # 8-byte Reload
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
movq 88(%rsp), %rcx
movq 96(%rsp), %rax
subq 176(%rsp), %rcx
imulq $1000000, %rcx, %rcx # imm = 0xF4240
subq 184(%rsp), %rax
addq %rcx, %rax
js .LBB3_9
# %bb.10:
cvtsi2ss %rax, %xmm0
jmp .LBB3_11
.LBB3_9:
movq %rax, %rcx
shrq %rcx
andl $1, %eax
orq %rcx, %rax
cvtsi2ss %rax, %xmm0
addss %xmm0, %xmm0
.LBB3_11:
divss .LCPI3_1(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.8, %edi
movb $1, %al
callq printf
movss 16(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.9, %edi
movb $1, %al
callq printf
shlq $3, %rbp
xorps %xmm0, %xmm0
cvtsi2ss %rbp, %xmm0
divss 16(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
divsd .LCPI3_2(%rip), %xmm0
movl $.L.str.10, %edi
movb $1, %al
callq printf
xorps %xmm0, %xmm0
cvtsi2ssl N(%rip), %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
callq _ZSt4log2f
mulss 12(%rsp), %xmm0 # 4-byte Folded Reload
cvtsi2ssl N(%rip), %xmm1
movss %xmm1, 56(%rsp) # 4-byte Spill
addss %xmm1, %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
movaps %xmm1, %xmm0
callq _ZSt4log2f
mulss 56(%rsp), %xmm0 # 4-byte Folded Reload
movl N(%rip), %eax
leal (%rax,%rax), %ecx
xorps %xmm1, %xmm1
cvtsi2ss %ecx, %xmm1
imull %eax, %ecx
cvtsi2ss %ecx, %xmm2
addss %xmm0, %xmm1
addss %xmm2, %xmm1
addss 12(%rsp), %xmm1 # 4-byte Folded Reload
addss %xmm2, %xmm1
xorps %xmm0, %xmm0
cvtss2sd %xmm1, %xmm0
mulsd .LCPI3_3(%rip), %xmm0
movss 16(%rsp), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
mulsd .LCPI3_4(%rip), %xmm1
divsd %xmm1, %xmm0
movl $.L.str.11, %edi
movb $1, %al
callq printf
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 80(%rsp), %rdi
callq hipFree
movq 72(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %edi, %edi
callq exit
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.section .text._ZSt4log2f,"axG",@progbits,_ZSt4log2f,comdat
.weak _ZSt4log2f # -- Begin function _ZSt4log2f
.p2align 4, 0x90
.type _ZSt4log2f,@function
_ZSt4log2f: # @_ZSt4log2f
.cfi_startproc
# %bb.0:
jmp log2f # TAILCALL
.Lfunc_end4:
.size _ZSt4log2f, .Lfunc_end4-_ZSt4log2f
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12Mean_SD_NormPfS_S_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type N,@object # @N
.bss
.globl N
.p2align 2, 0x0
N:
.long 0 # 0x0
.size N, 4
.type numThreads,@object # @numThreads
.globl numThreads
.p2align 2, 0x0
numThreads:
.long 0 # 0x0
.size numThreads, 4
.type _Z12Mean_SD_NormPfS_S_S_iii,@object # @_Z12Mean_SD_NormPfS_S_S_iii
.section .rodata,"a",@progbits
.globl _Z12Mean_SD_NormPfS_S_S_iii
.p2align 3, 0x0
_Z12Mean_SD_NormPfS_S_S_iii:
.quad _Z27__device_stub__Mean_SD_NormPfS_S_S_iii
.size _Z12Mean_SD_NormPfS_S_S_iii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Random seed = %i\n"
.size .L.str, 18
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "N = %i is out of range.\n"
.size .L.str.1, 25
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Number of threads cannot be more than %i.\n"
.size .L.str.2, 43
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n"
.size .L.str.3, 64
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "\nMatrix dimension N = %i.\n"
.size .L.str.4, 27
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\nElapsed time CPU Time = %g ms.\n"
.size .L.str.8, 33
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Elapsed GPU Time = %g ms \n"
.size .L.str.9, 27
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Effective Bandwidth in (GB/s): %f \n"
.size .L.str.10, 36
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Effective Throughput in (GFLOPS/s): %f \n"
.size .L.str.11, 42
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12Mean_SD_NormPfS_S_S_iii"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\nInitializing..."
.size .Lstr, 17
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "\nStarting clock."
.size .Lstr.1, 17
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Stopped clock."
.size .Lstr.2, 15
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__Mean_SD_NormPfS_S_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12Mean_SD_NormPfS_S_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda_runtime.h>
#include <cuda.h>
__global__
void cudaGrayScale(float *R, float *G, float *B, float* gray, int n){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < n) {
gray[i] = static_cast<float>((R[i] * 0.21 + G[i] * 0.71 + B[i] * 0.07) / 350.0);
}
}
void grayscale(float* R, float* G, float* B, float* grayscale, int n){
int size = n * sizeof(float);
float *d_R, *d_G, *d_B, *d_gray;
cudaMalloc((void **) &d_R, size);
cudaMemcpy(d_R, R, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_G, size);
cudaMemcpy(d_G, G, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_gray, size);
cudaGrayScale<<<ceil(n/1024.0), 1024>>>(d_R, d_G, d_B, d_gray, n);
cudaMemcpy(grayscale, d_gray, size, cudaMemcpyDeviceToHost);
cudaFree(d_R);
cudaFree(d_G);
cudaFree(d_B);
cudaFree(d_gray);
} | code for sm_80
Function : _Z13cudaGrayScalePfS_S_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R6, R0, R11, c[0x0][0x168] ; /* 0x00005a0000067625 */
/* 0x000fc800078e020b */
/*0090*/ IMAD.WIDE R2, R0.reuse, R11.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe200078e020b */
/*00a0*/ LDG.E R19, [R6.64] ; /* 0x0000000406137981 */
/* 0x0000a6000c1e1900 */
/*00b0*/ IMAD.WIDE R10, R0, R11, c[0x0][0x170] ; /* 0x00005c00000a7625 */
/* 0x000fe200078e020b */
/*00c0*/ LDG.E R18, [R2.64] ; /* 0x0000000402127981 */
/* 0x000eea000c1e1900 */
/*00d0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000f22000c1e1900 */
/*00e0*/ MUFU.RCP64H R13, 350 ; /* 0x4075e000000d7908 */
/* 0x000e620000001800 */
/*00f0*/ IMAD.MOV.U32 R16, RZ, RZ, 0x0 ; /* 0x00000000ff107424 */
/* 0x000fe200078e00ff */
/*0100*/ BSSY B0, 0x280 ; /* 0x0000017000007945 */
/* 0x000fe20003800000 */
/*0110*/ IMAD.MOV.U32 R17, RZ, RZ, 0x4075e000 ; /* 0x4075e000ff117424 */
/* 0x000fc400078e00ff */
/*0120*/ IMAD.MOV.U32 R12, RZ, RZ, 0x1 ; /* 0x00000001ff0c7424 */
/* 0x000fcc00078e00ff */
/*0130*/ DFMA R14, R12, -R16, 1 ; /* 0x3ff000000c0e742b */
/* 0x002e4c0000000810 */
/*0140*/ DFMA R14, R14, R14, R14 ; /* 0x0000000e0e0e722b */
/* 0x002e4c000000000e */
/*0150*/ DFMA R14, R12, R14, R12 ; /* 0x0000000e0c0e722b */
/* 0x002e0c000000000c */
/*0160*/ DFMA R6, R14, -R16, 1 ; /* 0x3ff000000e06742b */
/* 0x001e0c0000000810 */
/*0170*/ DFMA R6, R14, R6, R14 ; /* 0x000000060e06722b */
/* 0x001fe2000000000e */
/*0180*/ F2F.F64.F32 R8, R19 ; /* 0x0000001300087310 */
/* 0x004e300000201800 */
/*0190*/ F2F.F64.F32 R4, R18 ; /* 0x0000001200047310 */
/* 0x008e700000201800 */
/*01a0*/ F2F.F64.F32 R2, R10 ; /* 0x0000000a00027310 */
/* 0x010ea20000201800 */
/*01b0*/ DMUL R8, R8, c[0x2][0x0] ; /* 0x0080000008087a28 */
/* 0x001e4c0000000000 */
/*01c0*/ DFMA R4, R4, c[0x2][0x8], R8 ; /* 0x0080020004047a2b */
/* 0x002e8c0000000008 */
/*01d0*/ DFMA R4, R2, c[0x2][0x10], R4 ; /* 0x0080040002047a2b */
/* 0x004e0c0000000004 */
/*01e0*/ DMUL R2, R4, R6 ; /* 0x0000000604027228 */
/* 0x001e080000000000 */
/*01f0*/ FSETP.GEU.AND P1, PT, |R5|, 6.5827683646048100446e-37, PT ; /* 0x036000000500780b */
/* 0x000fe40003f2e200 */
/*0200*/ DFMA R8, R2, -350, R4 ; /* 0xc075e0000208782b */
/* 0x001e0c0000000004 */
/*0210*/ DFMA R2, R6, R8, R2 ; /* 0x000000080602722b */
/* 0x001e140000000002 */
/*0220*/ FFMA R6, RZ, 3.841796875, R3 ; /* 0x4075e000ff067823 */
/* 0x001fca0000000003 */
/*0230*/ FSETP.GT.AND P0, PT, |R6|, 1.469367938527859385e-39, PT ; /* 0x001000000600780b */
/* 0x000fda0003f04200 */
/*0240*/ @P0 BRA P1, 0x270 ; /* 0x0000002000000947 */
/* 0x000fea0000800000 */
/*0250*/ MOV R6, 0x270 ; /* 0x0000027000067802 */
/* 0x000fe40000000f00 */
/*0260*/ CALL.REL.NOINC 0x2d0 ; /* 0x0000006000007944 */
/* 0x000fea0003c00000 */
/*0270*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0280*/ F2F.F32.F64 R3, R2 ; /* 0x0000000200037310 */
/* 0x000e220000301000 */
/*0290*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fc800078e00ff */
/*02a0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x178] ; /* 0x00005e0000047625 */
/* 0x000fca00078e0205 */
/*02b0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x001fe2000c101904 */
/*02c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02d0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x3ff5e000 ; /* 0x3ff5e000ff037424 */
/* 0x000fe200078e00ff */
/*02e0*/ FSETP.GEU.AND P1, PT, |R5|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000500780b */
/* 0x040fe20003f2e200 */
/*02f0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x0 ; /* 0x00000000ff027424 */
/* 0x000fe200078e00ff */
/*0300*/ LOP3.LUT R7, R5, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000005077812 */
/* 0x000fe200078ec0ff */
/*0310*/ MUFU.RCP64H R9, R3 ; /* 0x0000000300097308 */
/* 0x000e220000001800 */
/*0320*/ IMAD.MOV.U32 R8, RZ, RZ, 0x1 ; /* 0x00000001ff087424 */
/* 0x000fe200078e00ff */
/*0330*/ BSSY B1, 0x790 ; /* 0x0000045000017945 */
/* 0x000fe20003800000 */
/*0340*/ IMAD.MOV.U32 R13, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff0d7424 */
/* 0x000fe200078e00ff */
/*0350*/ ISETP.GE.U32.AND P0, PT, R7, 0x40700000, PT ; /* 0x407000000700780c */
/* 0x000fe20003f06070 */
/*0360*/ IMAD.MOV.U32 R18, RZ, RZ, R7 ; /* 0x000000ffff127224 */
/* 0x000fc400078e0007 */
/*0370*/ IMAD.MOV.U32 R19, RZ, RZ, 0x40700000 ; /* 0x40700000ff137424 */
/* 0x000fe200078e00ff */
/*0380*/ SEL R13, R13, 0x63400000, !P0 ; /* 0x634000000d0d7807 */
/* 0x000fc80004000000 */
/*0390*/ @!P1 LOP3.LUT R12, R13, 0x80000000, R5, 0xf8, !PT ; /* 0x800000000d0c9812 */
/* 0x000fe400078ef805 */
/*03a0*/ IADD3 R20, R19, -0x1, RZ ; /* 0xffffffff13147810 */
/* 0x000fe20007ffe0ff */
/*03b0*/ DFMA R10, R8, -R2, 1 ; /* 0x3ff00000080a742b */
/* 0x001e0c0000000802 */
/*03c0*/ DFMA R10, R10, R10, R10 ; /* 0x0000000a0a0a722b */
/* 0x001e0c000000000a */
/*03d0*/ DFMA R14, R8, R10, R8 ; /* 0x0000000a080e722b */
/* 0x0010640000000008 */
/*03e0*/ LOP3.LUT R9, R13, 0x800fffff, R5, 0xf8, !PT ; /* 0x800fffff0d097812 */
/* 0x001fe200078ef805 */
/*03f0*/ IMAD.MOV.U32 R8, RZ, RZ, R4 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0004 */
/*0400*/ @!P1 LOP3.LUT R11, R12, 0x100000, RZ, 0xfc, !PT ; /* 0x001000000c0b9812 */
/* 0x000fe200078efcff */
/*0410*/ @!P1 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a9224 */
/* 0x000fe200078e00ff */
/*0420*/ DFMA R16, R14, -R2, 1 ; /* 0x3ff000000e10742b */
/* 0x002e0a0000000802 */
/*0430*/ @!P1 DFMA R8, R8, 2, -R10 ; /* 0x400000000808982b */
/* 0x000e48000000080a */
/*0440*/ DFMA R16, R14, R16, R14 ; /* 0x000000100e10722b */
/* 0x001e0c000000000e */
/*0450*/ @!P1 LOP3.LUT R18, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000009129812 */
/* 0x002fe200078ec0ff */
/*0460*/ DMUL R10, R16, R8 ; /* 0x00000008100a7228 */
/* 0x001e060000000000 */
/*0470*/ IADD3 R12, R18, -0x1, RZ ; /* 0xffffffff120c7810 */
/* 0x000fc60007ffe0ff */
/*0480*/ DFMA R14, R10, -R2, R8 ; /* 0x800000020a0e722b */
/* 0x001e220000000008 */
/*0490*/ ISETP.GT.U32.AND P0, PT, R12, 0x7feffffe, PT ; /* 0x7feffffe0c00780c */
/* 0x000fc80003f04070 */
/*04a0*/ ISETP.GT.U32.OR P0, PT, R20, 0x7feffffe, P0 ; /* 0x7feffffe1400780c */
/* 0x000fe20000704470 */
/*04b0*/ DFMA R10, R16, R14, R10 ; /* 0x0000000e100a722b */
/* 0x001058000000000a */
/*04c0*/ @P0 BRA 0x670 ; /* 0x000001a000000947 */
/* 0x000fea0003800000 */
/*04d0*/ IADD3 R7, R7, -0x40700000, RZ ; /* 0xbf90000007077810 */
/* 0x003fc80007ffe0ff */
/*04e0*/ IMNMX R7, R7, -0x46a00000, !PT ; /* 0xb960000007077817 */
/* 0x000fc80007800200 */
/*04f0*/ IMNMX R4, R7, 0x46a00000, PT ; /* 0x46a0000007047817 */
/* 0x000fca0003800200 */
/*0500*/ IMAD.IADD R7, R4, 0x1, -R13 ; /* 0x0000000104077824 */
/* 0x000fe400078e0a0d */
/*0510*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fc600078e00ff */
/*0520*/ IADD3 R5, R7, 0x7fe00000, RZ ; /* 0x7fe0000007057810 */
/* 0x000fcc0007ffe0ff */
/*0530*/ DMUL R12, R10, R4 ; /* 0x000000040a0c7228 */
/* 0x000e140000000000 */
/*0540*/ FSETP.GTU.AND P0, PT, |R13|, 1.469367938527859385e-39, PT ; /* 0x001000000d00780b */
/* 0x001fda0003f0c200 */
/*0550*/ @P0 BRA 0x780 ; /* 0x0000022000000947 */
/* 0x000fea0003800000 */
/*0560*/ DFMA R2, R10, -R2, R8 ; /* 0x800000020a02722b */
/* 0x000e220000000008 */
/*0570*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fd200078e00ff */
/*0580*/ FSETP.NEU.AND P0, PT, R3.reuse, RZ, PT ; /* 0x000000ff0300720b */
/* 0x041fe40003f0d000 */
/*0590*/ LOP3.LUT R2, R3, 0x4075e000, RZ, 0x3c, !PT ; /* 0x4075e00003027812 */
/* 0x000fc800078e3cff */
/*05a0*/ LOP3.LUT R9, R2, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000002097812 */
/* 0x000fc800078ec0ff */
/*05b0*/ LOP3.LUT R5, R9, R5, RZ, 0xfc, !PT ; /* 0x0000000509057212 */
/* 0x000fc600078efcff */
/*05c0*/ @!P0 BRA 0x780 ; /* 0x000001b000008947 */
/* 0x000fea0003800000 */
/*05d0*/ IMAD.MOV R3, RZ, RZ, -R7 ; /* 0x000000ffff037224 */
/* 0x000fe200078e0a07 */
/*05e0*/ DMUL.RP R4, R10, R4 ; /* 0x000000040a047228 */
/* 0x000e220000008000 */
/*05f0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fe200078e00ff */
/*0600*/ IADD3 R7, -R7, -0x43300000, RZ ; /* 0xbcd0000007077810 */
/* 0x000fca0007ffe1ff */
/*0610*/ DFMA R2, R12, -R2, R10 ; /* 0x800000020c02722b */
/* 0x000e46000000000a */
/*0620*/ LOP3.LUT R9, R5, R9, RZ, 0x3c, !PT ; /* 0x0000000905097212 */
/* 0x001fce00078e3cff */
/*0630*/ FSETP.NEU.AND P0, PT, |R3|, R7, PT ; /* 0x000000070300720b */
/* 0x002fc80003f0d200 */
/*0640*/ FSEL R12, R4, R12, !P0 ; /* 0x0000000c040c7208 */
/* 0x000fe40004000000 */
/*0650*/ FSEL R13, R9, R13, !P0 ; /* 0x0000000d090d7208 */
/* 0x000fe20004000000 */
/*0660*/ BRA 0x780 ; /* 0x0000011000007947 */
/* 0x000fea0003800000 */
/*0670*/ DSETP.NAN.AND P0, PT, R4, R4, PT ; /* 0x000000040400722a */
/* 0x003e1c0003f08000 */
/*0680*/ @P0 BRA 0x760 ; /* 0x000000d000000947 */
/* 0x001fea0003800000 */
/*0690*/ ISETP.NE.AND P0, PT, R18, R19, PT ; /* 0x000000131200720c */
/* 0x000fe20003f05270 */
/*06a0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */
/* 0x000fe400078e00ff */
/*06b0*/ IMAD.MOV.U32 R13, RZ, RZ, -0x80000 ; /* 0xfff80000ff0d7424 */
/* 0x000fd400078e00ff */
/*06c0*/ @!P0 BRA 0x780 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*06d0*/ ISETP.NE.AND P0, PT, R18, 0x7ff00000, PT ; /* 0x7ff000001200780c */
/* 0x000fe40003f05270 */
/*06e0*/ LOP3.LUT R4, R5, 0x4075e000, RZ, 0x3c, !PT ; /* 0x4075e00005047812 */
/* 0x000fe400078e3cff */
/*06f0*/ ISETP.EQ.OR P0, PT, R19, RZ, !P0 ; /* 0x000000ff1300720c */
/* 0x000fe40004702670 */
/*0700*/ LOP3.LUT R13, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x80000000040d7812 */
/* 0x000fd600078ec0ff */
/*0710*/ @P0 LOP3.LUT R2, R13, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000d020812 */
/* 0x000fe200078efcff */
/*0720*/ @!P0 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c8224 */
/* 0x000fe400078e00ff */
/*0730*/ @P0 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c0224 */
/* 0x000fe400078e00ff */
/*0740*/ @P0 IMAD.MOV.U32 R13, RZ, RZ, R2 ; /* 0x000000ffff0d0224 */
/* 0x000fe200078e0002 */
/*0750*/ BRA 0x780 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*0760*/ LOP3.LUT R13, R5, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000050d7812 */
/* 0x000fe200078efcff */
/*0770*/ IMAD.MOV.U32 R12, RZ, RZ, R4 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0004 */
/*0780*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0790*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x000fe400078e00ff */
/*07a0*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x000fe400078e000c */
/*07b0*/ IMAD.MOV.U32 R3, RZ, RZ, R13 ; /* 0x000000ffff037224 */
/* 0x000fe200078e000d */
/*07c0*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff83006007950 */
/* 0x000fec0003c3ffff */
/*07d0*/ BRA 0x7d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*07e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0800*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0810*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0820*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_runtime.h>
#include <cuda.h>
__global__
void cudaGrayScale(float *R, float *G, float *B, float* gray, int n){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < n) {
gray[i] = static_cast<float>((R[i] * 0.21 + G[i] * 0.71 + B[i] * 0.07) / 350.0);
}
}
void grayscale(float* R, float* G, float* B, float* grayscale, int n){
int size = n * sizeof(float);
float *d_R, *d_G, *d_B, *d_gray;
cudaMalloc((void **) &d_R, size);
cudaMemcpy(d_R, R, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_G, size);
cudaMemcpy(d_G, G, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_gray, size);
cudaGrayScale<<<ceil(n/1024.0), 1024>>>(d_R, d_G, d_B, d_gray, n);
cudaMemcpy(grayscale, d_gray, size, cudaMemcpyDeviceToHost);
cudaFree(d_R);
cudaFree(d_G);
cudaFree(d_B);
cudaFree(d_gray);
} | .file "tmpxft_0016c987_00000000-6_grayscale.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i
.type _Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i, @function
_Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z13cudaGrayScalePfS_S_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i, .-_Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i
.globl _Z13cudaGrayScalePfS_S_S_i
.type _Z13cudaGrayScalePfS_S_S_i, @function
_Z13cudaGrayScalePfS_S_S_i:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z13cudaGrayScalePfS_S_S_i, .-_Z13cudaGrayScalePfS_S_S_i
.globl _Z9grayscalePfS_S_S_i
.type _Z9grayscalePfS_S_S_i, @function
_Z9grayscalePfS_S_S_i:
.LFB2027:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r15
movq %rsi, %r14
movq %rdx, %r13
movq %rcx, %r12
movl %r8d, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leal 0(,%r8,4), %ebx
movslq %ebx, %rbx
movq %rsp, %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r15, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1024, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
pxor %xmm0, %xmm0
cvtsi2sdl %ebp, %xmm0
mulsd .LC0(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC4(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC1(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L12
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC3(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L12:
cvttsd2siq %xmm3, %rax
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl 52(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L13:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movl %ebp, %r8d
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i
jmp .L13
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size _Z9grayscalePfS_S_S_i, .-_Z9grayscalePfS_S_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "_Z13cudaGrayScalePfS_S_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z13cudaGrayScalePfS_S_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1062207488
.align 8
.LC1:
.long 0
.long 1127219200
.align 8
.LC3:
.long 0
.long 1072693248
.align 8
.LC4:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime.h>
#include <cuda.h>
__global__
void cudaGrayScale(float *R, float *G, float *B, float* gray, int n){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < n) {
gray[i] = static_cast<float>((R[i] * 0.21 + G[i] * 0.71 + B[i] * 0.07) / 350.0);
}
}
void grayscale(float* R, float* G, float* B, float* grayscale, int n){
int size = n * sizeof(float);
float *d_R, *d_G, *d_B, *d_gray;
cudaMalloc((void **) &d_R, size);
cudaMemcpy(d_R, R, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_G, size);
cudaMemcpy(d_G, G, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_gray, size);
cudaGrayScale<<<ceil(n/1024.0), 1024>>>(d_R, d_G, d_B, d_gray, n);
cudaMemcpy(grayscale, d_gray, size, cudaMemcpyDeviceToHost);
cudaFree(d_R);
cudaFree(d_G);
cudaFree(d_B);
cudaFree(d_gray);
} | #include <hip/hip_runtime.h>
__global__
void cudaGrayScale(float *R, float *G, float *B, float* gray, int n){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < n) {
gray[i] = static_cast<float>((R[i] * 0.21 + G[i] * 0.71 + B[i] * 0.07) / 350.0);
}
}
void grayscale(float* R, float* G, float* B, float* grayscale, int n){
int size = n * sizeof(float);
float *d_R, *d_G, *d_B, *d_gray;
hipMalloc((void **) &d_R, size);
hipMemcpy(d_R, R, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_G, size);
hipMemcpy(d_G, G, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_gray, size);
cudaGrayScale<<<ceil(n/1024.0), 1024>>>(d_R, d_G, d_B, d_gray, n);
hipMemcpy(grayscale, d_gray, size, hipMemcpyDeviceToHost);
hipFree(d_R);
hipFree(d_G);
hipFree(d_B);
hipFree(d_gray);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__
void cudaGrayScale(float *R, float *G, float *B, float* gray, int n){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < n) {
gray[i] = static_cast<float>((R[i] * 0.21 + G[i] * 0.71 + B[i] * 0.07) / 350.0);
}
}
void grayscale(float* R, float* G, float* B, float* grayscale, int n){
int size = n * sizeof(float);
float *d_R, *d_G, *d_B, *d_gray;
hipMalloc((void **) &d_R, size);
hipMemcpy(d_R, R, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_G, size);
hipMemcpy(d_G, G, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_gray, size);
cudaGrayScale<<<ceil(n/1024.0), 1024>>>(d_R, d_G, d_B, d_gray, n);
hipMemcpy(grayscale, d_gray, size, hipMemcpyDeviceToHost);
hipFree(d_R);
hipFree(d_G);
hipFree(d_B);
hipFree(d_gray);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13cudaGrayScalePfS_S_S_i
.globl _Z13cudaGrayScalePfS_S_S_i
.p2align 8
.type _Z13cudaGrayScalePfS_S_S_i,@function
_Z13cudaGrayScalePfS_S_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b256 s[0:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
global_load_b32 v4, v[2:3], off
v_add_co_u32 v2, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
s_mov_b32 s1, 0x3fe6b851
s_mov_b32 s0, 0xeb851eb8
global_load_b32 v5, v[2:3], off
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
global_load_b32 v6, v[2:3], off
s_waitcnt vmcnt(2)
v_cvt_f64_f32_e32 v[2:3], v4
s_waitcnt vmcnt(1)
v_cvt_f64_f32_e32 v[4:5], v5
s_waitcnt vmcnt(0)
v_cvt_f64_f32_e32 v[6:7], v6
s_delay_alu instid0(VALU_DEP_3)
v_mul_f64 v[2:3], v[2:3], s[0:1]
s_mov_b32 s1, 0x3fcae147
s_mov_b32 s0, 0xae147ae1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[2:3], v[4:5], s[0:1], v[2:3]
s_mov_b32 s1, 0x3fb1eb85
s_mov_b32 s0, 0x1eb851ec
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[2:3], v[6:7], s[0:1], v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f64 v[4:5], null, 0x4075e000, 0x4075e000, v[2:3]
v_rcp_f64_e32 v[6:7], v[4:5]
s_waitcnt_depctr 0xfff
v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
v_div_scale_f64 v[8:9], vcc_lo, v[2:3], 0x4075e000, v[2:3]
v_mul_f64 v[10:11], v[8:9], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[4:5], -v[4:5], v[10:11], v[8:9]
v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[10:11]
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f64 v[2:3], v[4:5], 0x4075e000, v[2:3]
v_cvt_f32_f64_e32 v2, v[2:3]
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13cudaGrayScalePfS_S_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13cudaGrayScalePfS_S_S_i, .Lfunc_end0-_Z13cudaGrayScalePfS_S_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13cudaGrayScalePfS_S_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13cudaGrayScalePfS_S_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__
void cudaGrayScale(float *R, float *G, float *B, float* gray, int n){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < n) {
gray[i] = static_cast<float>((R[i] * 0.21 + G[i] * 0.71 + B[i] * 0.07) / 350.0);
}
}
void grayscale(float* R, float* G, float* B, float* grayscale, int n){
int size = n * sizeof(float);
float *d_R, *d_G, *d_B, *d_gray;
hipMalloc((void **) &d_R, size);
hipMemcpy(d_R, R, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_G, size);
hipMemcpy(d_G, G, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_gray, size);
cudaGrayScale<<<ceil(n/1024.0), 1024>>>(d_R, d_G, d_B, d_gray, n);
hipMemcpy(grayscale, d_gray, size, hipMemcpyDeviceToHost);
hipFree(d_R);
hipFree(d_G);
hipFree(d_B);
hipFree(d_gray);
} | .text
.file "grayscale.hip"
.globl _Z28__device_stub__cudaGrayScalePfS_S_S_i # -- Begin function _Z28__device_stub__cudaGrayScalePfS_S_S_i
.p2align 4, 0x90
.type _Z28__device_stub__cudaGrayScalePfS_S_S_i,@function
_Z28__device_stub__cudaGrayScalePfS_S_S_i: # @_Z28__device_stub__cudaGrayScalePfS_S_S_i
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13cudaGrayScalePfS_S_S_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z28__device_stub__cudaGrayScalePfS_S_S_i, .Lfunc_end0-_Z28__device_stub__cudaGrayScalePfS_S_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z9grayscalePfS_S_S_i
.LCPI1_0:
.quad 0x3f50000000000000 # double 9.765625E-4
.text
.globl _Z9grayscalePfS_S_S_i
.p2align 4, 0x90
.type _Z9grayscalePfS_S_S_i,@function
_Z9grayscalePfS_S_S_i: # @_Z9grayscalePfS_S_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r8d, %r15d
movq %rcx, %rbx
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %rbp
leal (,%r15,4), %eax
movslq %eax, %r14
leaq 32(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 32(%rsp), %rdi
movq %rbp, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq %r13, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
cvtsi2sd %r15d, %xmm0
mulsd .LCPI1_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %rax
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $1024, %rdx # imm = 0x400
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rsi, 96(%rsp)
movl %r15d, 44(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 44(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z13cudaGrayScalePfS_S_S_i, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9grayscalePfS_S_S_i, .Lfunc_end1-_Z9grayscalePfS_S_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13cudaGrayScalePfS_S_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13cudaGrayScalePfS_S_S_i,@object # @_Z13cudaGrayScalePfS_S_S_i
.section .rodata,"a",@progbits
.globl _Z13cudaGrayScalePfS_S_S_i
.p2align 3, 0x0
_Z13cudaGrayScalePfS_S_S_i:
.quad _Z28__device_stub__cudaGrayScalePfS_S_S_i
.size _Z13cudaGrayScalePfS_S_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13cudaGrayScalePfS_S_S_i"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__cudaGrayScalePfS_S_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13cudaGrayScalePfS_S_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13cudaGrayScalePfS_S_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R6, R0, R11, c[0x0][0x168] ; /* 0x00005a0000067625 */
/* 0x000fc800078e020b */
/*0090*/ IMAD.WIDE R2, R0.reuse, R11.reuse, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x0c0fe200078e020b */
/*00a0*/ LDG.E R19, [R6.64] ; /* 0x0000000406137981 */
/* 0x0000a6000c1e1900 */
/*00b0*/ IMAD.WIDE R10, R0, R11, c[0x0][0x170] ; /* 0x00005c00000a7625 */
/* 0x000fe200078e020b */
/*00c0*/ LDG.E R18, [R2.64] ; /* 0x0000000402127981 */
/* 0x000eea000c1e1900 */
/*00d0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000f22000c1e1900 */
/*00e0*/ MUFU.RCP64H R13, 350 ; /* 0x4075e000000d7908 */
/* 0x000e620000001800 */
/*00f0*/ IMAD.MOV.U32 R16, RZ, RZ, 0x0 ; /* 0x00000000ff107424 */
/* 0x000fe200078e00ff */
/*0100*/ BSSY B0, 0x280 ; /* 0x0000017000007945 */
/* 0x000fe20003800000 */
/*0110*/ IMAD.MOV.U32 R17, RZ, RZ, 0x4075e000 ; /* 0x4075e000ff117424 */
/* 0x000fc400078e00ff */
/*0120*/ IMAD.MOV.U32 R12, RZ, RZ, 0x1 ; /* 0x00000001ff0c7424 */
/* 0x000fcc00078e00ff */
/*0130*/ DFMA R14, R12, -R16, 1 ; /* 0x3ff000000c0e742b */
/* 0x002e4c0000000810 */
/*0140*/ DFMA R14, R14, R14, R14 ; /* 0x0000000e0e0e722b */
/* 0x002e4c000000000e */
/*0150*/ DFMA R14, R12, R14, R12 ; /* 0x0000000e0c0e722b */
/* 0x002e0c000000000c */
/*0160*/ DFMA R6, R14, -R16, 1 ; /* 0x3ff000000e06742b */
/* 0x001e0c0000000810 */
/*0170*/ DFMA R6, R14, R6, R14 ; /* 0x000000060e06722b */
/* 0x001fe2000000000e */
/*0180*/ F2F.F64.F32 R8, R19 ; /* 0x0000001300087310 */
/* 0x004e300000201800 */
/*0190*/ F2F.F64.F32 R4, R18 ; /* 0x0000001200047310 */
/* 0x008e700000201800 */
/*01a0*/ F2F.F64.F32 R2, R10 ; /* 0x0000000a00027310 */
/* 0x010ea20000201800 */
/*01b0*/ DMUL R8, R8, c[0x2][0x0] ; /* 0x0080000008087a28 */
/* 0x001e4c0000000000 */
/*01c0*/ DFMA R4, R4, c[0x2][0x8], R8 ; /* 0x0080020004047a2b */
/* 0x002e8c0000000008 */
/*01d0*/ DFMA R4, R2, c[0x2][0x10], R4 ; /* 0x0080040002047a2b */
/* 0x004e0c0000000004 */
/*01e0*/ DMUL R2, R4, R6 ; /* 0x0000000604027228 */
/* 0x001e080000000000 */
/*01f0*/ FSETP.GEU.AND P1, PT, |R5|, 6.5827683646048100446e-37, PT ; /* 0x036000000500780b */
/* 0x000fe40003f2e200 */
/*0200*/ DFMA R8, R2, -350, R4 ; /* 0xc075e0000208782b */
/* 0x001e0c0000000004 */
/*0210*/ DFMA R2, R6, R8, R2 ; /* 0x000000080602722b */
/* 0x001e140000000002 */
/*0220*/ FFMA R6, RZ, 3.841796875, R3 ; /* 0x4075e000ff067823 */
/* 0x001fca0000000003 */
/*0230*/ FSETP.GT.AND P0, PT, |R6|, 1.469367938527859385e-39, PT ; /* 0x001000000600780b */
/* 0x000fda0003f04200 */
/*0240*/ @P0 BRA P1, 0x270 ; /* 0x0000002000000947 */
/* 0x000fea0000800000 */
/*0250*/ MOV R6, 0x270 ; /* 0x0000027000067802 */
/* 0x000fe40000000f00 */
/*0260*/ CALL.REL.NOINC 0x2d0 ; /* 0x0000006000007944 */
/* 0x000fea0003c00000 */
/*0270*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0280*/ F2F.F32.F64 R3, R2 ; /* 0x0000000200037310 */
/* 0x000e220000301000 */
/*0290*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fc800078e00ff */
/*02a0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x178] ; /* 0x00005e0000047625 */
/* 0x000fca00078e0205 */
/*02b0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x001fe2000c101904 */
/*02c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*02d0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x3ff5e000 ; /* 0x3ff5e000ff037424 */
/* 0x000fe200078e00ff */
/*02e0*/ FSETP.GEU.AND P1, PT, |R5|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000500780b */
/* 0x040fe20003f2e200 */
/*02f0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x0 ; /* 0x00000000ff027424 */
/* 0x000fe200078e00ff */
/*0300*/ LOP3.LUT R7, R5, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000005077812 */
/* 0x000fe200078ec0ff */
/*0310*/ MUFU.RCP64H R9, R3 ; /* 0x0000000300097308 */
/* 0x000e220000001800 */
/*0320*/ IMAD.MOV.U32 R8, RZ, RZ, 0x1 ; /* 0x00000001ff087424 */
/* 0x000fe200078e00ff */
/*0330*/ BSSY B1, 0x790 ; /* 0x0000045000017945 */
/* 0x000fe20003800000 */
/*0340*/ IMAD.MOV.U32 R13, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff0d7424 */
/* 0x000fe200078e00ff */
/*0350*/ ISETP.GE.U32.AND P0, PT, R7, 0x40700000, PT ; /* 0x407000000700780c */
/* 0x000fe20003f06070 */
/*0360*/ IMAD.MOV.U32 R18, RZ, RZ, R7 ; /* 0x000000ffff127224 */
/* 0x000fc400078e0007 */
/*0370*/ IMAD.MOV.U32 R19, RZ, RZ, 0x40700000 ; /* 0x40700000ff137424 */
/* 0x000fe200078e00ff */
/*0380*/ SEL R13, R13, 0x63400000, !P0 ; /* 0x634000000d0d7807 */
/* 0x000fc80004000000 */
/*0390*/ @!P1 LOP3.LUT R12, R13, 0x80000000, R5, 0xf8, !PT ; /* 0x800000000d0c9812 */
/* 0x000fe400078ef805 */
/*03a0*/ IADD3 R20, R19, -0x1, RZ ; /* 0xffffffff13147810 */
/* 0x000fe20007ffe0ff */
/*03b0*/ DFMA R10, R8, -R2, 1 ; /* 0x3ff00000080a742b */
/* 0x001e0c0000000802 */
/*03c0*/ DFMA R10, R10, R10, R10 ; /* 0x0000000a0a0a722b */
/* 0x001e0c000000000a */
/*03d0*/ DFMA R14, R8, R10, R8 ; /* 0x0000000a080e722b */
/* 0x0010640000000008 */
/*03e0*/ LOP3.LUT R9, R13, 0x800fffff, R5, 0xf8, !PT ; /* 0x800fffff0d097812 */
/* 0x001fe200078ef805 */
/*03f0*/ IMAD.MOV.U32 R8, RZ, RZ, R4 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0004 */
/*0400*/ @!P1 LOP3.LUT R11, R12, 0x100000, RZ, 0xfc, !PT ; /* 0x001000000c0b9812 */
/* 0x000fe200078efcff */
/*0410*/ @!P1 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a9224 */
/* 0x000fe200078e00ff */
/*0420*/ DFMA R16, R14, -R2, 1 ; /* 0x3ff000000e10742b */
/* 0x002e0a0000000802 */
/*0430*/ @!P1 DFMA R8, R8, 2, -R10 ; /* 0x400000000808982b */
/* 0x000e48000000080a */
/*0440*/ DFMA R16, R14, R16, R14 ; /* 0x000000100e10722b */
/* 0x001e0c000000000e */
/*0450*/ @!P1 LOP3.LUT R18, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000009129812 */
/* 0x002fe200078ec0ff */
/*0460*/ DMUL R10, R16, R8 ; /* 0x00000008100a7228 */
/* 0x001e060000000000 */
/*0470*/ IADD3 R12, R18, -0x1, RZ ; /* 0xffffffff120c7810 */
/* 0x000fc60007ffe0ff */
/*0480*/ DFMA R14, R10, -R2, R8 ; /* 0x800000020a0e722b */
/* 0x001e220000000008 */
/*0490*/ ISETP.GT.U32.AND P0, PT, R12, 0x7feffffe, PT ; /* 0x7feffffe0c00780c */
/* 0x000fc80003f04070 */
/*04a0*/ ISETP.GT.U32.OR P0, PT, R20, 0x7feffffe, P0 ; /* 0x7feffffe1400780c */
/* 0x000fe20000704470 */
/*04b0*/ DFMA R10, R16, R14, R10 ; /* 0x0000000e100a722b */
/* 0x001058000000000a */
/*04c0*/ @P0 BRA 0x670 ; /* 0x000001a000000947 */
/* 0x000fea0003800000 */
/*04d0*/ IADD3 R7, R7, -0x40700000, RZ ; /* 0xbf90000007077810 */
/* 0x003fc80007ffe0ff */
/*04e0*/ IMNMX R7, R7, -0x46a00000, !PT ; /* 0xb960000007077817 */
/* 0x000fc80007800200 */
/*04f0*/ IMNMX R4, R7, 0x46a00000, PT ; /* 0x46a0000007047817 */
/* 0x000fca0003800200 */
/*0500*/ IMAD.IADD R7, R4, 0x1, -R13 ; /* 0x0000000104077824 */
/* 0x000fe400078e0a0d */
/*0510*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fc600078e00ff */
/*0520*/ IADD3 R5, R7, 0x7fe00000, RZ ; /* 0x7fe0000007057810 */
/* 0x000fcc0007ffe0ff */
/*0530*/ DMUL R12, R10, R4 ; /* 0x000000040a0c7228 */
/* 0x000e140000000000 */
/*0540*/ FSETP.GTU.AND P0, PT, |R13|, 1.469367938527859385e-39, PT ; /* 0x001000000d00780b */
/* 0x001fda0003f0c200 */
/*0550*/ @P0 BRA 0x780 ; /* 0x0000022000000947 */
/* 0x000fea0003800000 */
/*0560*/ DFMA R2, R10, -R2, R8 ; /* 0x800000020a02722b */
/* 0x000e220000000008 */
/*0570*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fd200078e00ff */
/*0580*/ FSETP.NEU.AND P0, PT, R3.reuse, RZ, PT ; /* 0x000000ff0300720b */
/* 0x041fe40003f0d000 */
/*0590*/ LOP3.LUT R2, R3, 0x4075e000, RZ, 0x3c, !PT ; /* 0x4075e00003027812 */
/* 0x000fc800078e3cff */
/*05a0*/ LOP3.LUT R9, R2, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000002097812 */
/* 0x000fc800078ec0ff */
/*05b0*/ LOP3.LUT R5, R9, R5, RZ, 0xfc, !PT ; /* 0x0000000509057212 */
/* 0x000fc600078efcff */
/*05c0*/ @!P0 BRA 0x780 ; /* 0x000001b000008947 */
/* 0x000fea0003800000 */
/*05d0*/ IMAD.MOV R3, RZ, RZ, -R7 ; /* 0x000000ffff037224 */
/* 0x000fe200078e0a07 */
/*05e0*/ DMUL.RP R4, R10, R4 ; /* 0x000000040a047228 */
/* 0x000e220000008000 */
/*05f0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fe200078e00ff */
/*0600*/ IADD3 R7, -R7, -0x43300000, RZ ; /* 0xbcd0000007077810 */
/* 0x000fca0007ffe1ff */
/*0610*/ DFMA R2, R12, -R2, R10 ; /* 0x800000020c02722b */
/* 0x000e46000000000a */
/*0620*/ LOP3.LUT R9, R5, R9, RZ, 0x3c, !PT ; /* 0x0000000905097212 */
/* 0x001fce00078e3cff */
/*0630*/ FSETP.NEU.AND P0, PT, |R3|, R7, PT ; /* 0x000000070300720b */
/* 0x002fc80003f0d200 */
/*0640*/ FSEL R12, R4, R12, !P0 ; /* 0x0000000c040c7208 */
/* 0x000fe40004000000 */
/*0650*/ FSEL R13, R9, R13, !P0 ; /* 0x0000000d090d7208 */
/* 0x000fe20004000000 */
/*0660*/ BRA 0x780 ; /* 0x0000011000007947 */
/* 0x000fea0003800000 */
/*0670*/ DSETP.NAN.AND P0, PT, R4, R4, PT ; /* 0x000000040400722a */
/* 0x003e1c0003f08000 */
/*0680*/ @P0 BRA 0x760 ; /* 0x000000d000000947 */
/* 0x001fea0003800000 */
/*0690*/ ISETP.NE.AND P0, PT, R18, R19, PT ; /* 0x000000131200720c */
/* 0x000fe20003f05270 */
/*06a0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */
/* 0x000fe400078e00ff */
/*06b0*/ IMAD.MOV.U32 R13, RZ, RZ, -0x80000 ; /* 0xfff80000ff0d7424 */
/* 0x000fd400078e00ff */
/*06c0*/ @!P0 BRA 0x780 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*06d0*/ ISETP.NE.AND P0, PT, R18, 0x7ff00000, PT ; /* 0x7ff000001200780c */
/* 0x000fe40003f05270 */
/*06e0*/ LOP3.LUT R4, R5, 0x4075e000, RZ, 0x3c, !PT ; /* 0x4075e00005047812 */
/* 0x000fe400078e3cff */
/*06f0*/ ISETP.EQ.OR P0, PT, R19, RZ, !P0 ; /* 0x000000ff1300720c */
/* 0x000fe40004702670 */
/*0700*/ LOP3.LUT R13, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x80000000040d7812 */
/* 0x000fd600078ec0ff */
/*0710*/ @P0 LOP3.LUT R2, R13, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000d020812 */
/* 0x000fe200078efcff */
/*0720*/ @!P0 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c8224 */
/* 0x000fe400078e00ff */
/*0730*/ @P0 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c0224 */
/* 0x000fe400078e00ff */
/*0740*/ @P0 IMAD.MOV.U32 R13, RZ, RZ, R2 ; /* 0x000000ffff0d0224 */
/* 0x000fe200078e0002 */
/*0750*/ BRA 0x780 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*0760*/ LOP3.LUT R13, R5, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000050d7812 */
/* 0x000fe200078efcff */
/*0770*/ IMAD.MOV.U32 R12, RZ, RZ, R4 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0004 */
/*0780*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0790*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x000fe400078e00ff */
/*07a0*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x000fe400078e000c */
/*07b0*/ IMAD.MOV.U32 R3, RZ, RZ, R13 ; /* 0x000000ffff037224 */
/* 0x000fe200078e000d */
/*07c0*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff83006007950 */
/* 0x000fec0003c3ffff */
/*07d0*/ BRA 0x7d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*07e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0800*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0810*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0820*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0830*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0840*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0850*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0860*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0870*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13cudaGrayScalePfS_S_S_i
.globl _Z13cudaGrayScalePfS_S_S_i
.p2align 8
.type _Z13cudaGrayScalePfS_S_S_i,@function
_Z13cudaGrayScalePfS_S_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b256 s[0:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
global_load_b32 v4, v[2:3], off
v_add_co_u32 v2, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
s_mov_b32 s1, 0x3fe6b851
s_mov_b32 s0, 0xeb851eb8
global_load_b32 v5, v[2:3], off
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
global_load_b32 v6, v[2:3], off
s_waitcnt vmcnt(2)
v_cvt_f64_f32_e32 v[2:3], v4
s_waitcnt vmcnt(1)
v_cvt_f64_f32_e32 v[4:5], v5
s_waitcnt vmcnt(0)
v_cvt_f64_f32_e32 v[6:7], v6
s_delay_alu instid0(VALU_DEP_3)
v_mul_f64 v[2:3], v[2:3], s[0:1]
s_mov_b32 s1, 0x3fcae147
s_mov_b32 s0, 0xae147ae1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[2:3], v[4:5], s[0:1], v[2:3]
s_mov_b32 s1, 0x3fb1eb85
s_mov_b32 s0, 0x1eb851ec
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[2:3], v[6:7], s[0:1], v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f64 v[4:5], null, 0x4075e000, 0x4075e000, v[2:3]
v_rcp_f64_e32 v[6:7], v[4:5]
s_waitcnt_depctr 0xfff
v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
v_div_scale_f64 v[8:9], vcc_lo, v[2:3], 0x4075e000, v[2:3]
v_mul_f64 v[10:11], v[8:9], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[4:5], -v[4:5], v[10:11], v[8:9]
v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[10:11]
v_add_co_u32 v0, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f64 v[2:3], v[4:5], 0x4075e000, v[2:3]
v_cvt_f32_f64_e32 v2, v[2:3]
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13cudaGrayScalePfS_S_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13cudaGrayScalePfS_S_S_i, .Lfunc_end0-_Z13cudaGrayScalePfS_S_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13cudaGrayScalePfS_S_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13cudaGrayScalePfS_S_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0016c987_00000000-6_grayscale.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i
.type _Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i, @function
_Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z13cudaGrayScalePfS_S_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i, .-_Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i
.globl _Z13cudaGrayScalePfS_S_S_i
.type _Z13cudaGrayScalePfS_S_S_i, @function
_Z13cudaGrayScalePfS_S_S_i:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z13cudaGrayScalePfS_S_S_i, .-_Z13cudaGrayScalePfS_S_S_i
.globl _Z9grayscalePfS_S_S_i
.type _Z9grayscalePfS_S_S_i, @function
_Z9grayscalePfS_S_S_i:
.LFB2027:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r15
movq %rsi, %r14
movq %rdx, %r13
movq %rcx, %r12
movl %r8d, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leal 0(,%r8,4), %ebx
movslq %ebx, %rbx
movq %rsp, %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r15, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1024, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
pxor %xmm0, %xmm0
cvtsi2sdl %ebp, %xmm0
mulsd .LC0(%rip), %xmm0
movapd %xmm0, %xmm3
movsd .LC4(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC1(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L12
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC3(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L12:
cvttsd2siq %xmm3, %rax
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
movl 52(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L16
.L13:
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movl %ebp, %r8d
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z40__device_stub__Z13cudaGrayScalePfS_S_S_iPfS_S_S_i
jmp .L13
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size _Z9grayscalePfS_S_S_i, .-_Z9grayscalePfS_S_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "_Z13cudaGrayScalePfS_S_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z13cudaGrayScalePfS_S_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1062207488
.align 8
.LC1:
.long 0
.long 1127219200
.align 8
.LC3:
.long 0
.long 1072693248
.align 8
.LC4:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "grayscale.hip"
.globl _Z28__device_stub__cudaGrayScalePfS_S_S_i # -- Begin function _Z28__device_stub__cudaGrayScalePfS_S_S_i
.p2align 4, 0x90
.type _Z28__device_stub__cudaGrayScalePfS_S_S_i,@function
_Z28__device_stub__cudaGrayScalePfS_S_S_i: # @_Z28__device_stub__cudaGrayScalePfS_S_S_i
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13cudaGrayScalePfS_S_S_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end0:
.size _Z28__device_stub__cudaGrayScalePfS_S_S_i, .Lfunc_end0-_Z28__device_stub__cudaGrayScalePfS_S_S_i
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z9grayscalePfS_S_S_i
.LCPI1_0:
.quad 0x3f50000000000000 # double 9.765625E-4
.text
.globl _Z9grayscalePfS_S_S_i
.p2align 4, 0x90
.type _Z9grayscalePfS_S_S_i,@function
_Z9grayscalePfS_S_S_i: # @_Z9grayscalePfS_S_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r8d, %r15d
movq %rcx, %rbx
movq %rdx, %r12
movq %rsi, %r13
movq %rdi, %rbp
leal (,%r15,4), %eax
movslq %eax, %r14
leaq 32(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 32(%rsp), %rdi
movq %rbp, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq %r13, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
cvtsi2sd %r15d, %xmm0
mulsd .LCPI1_0(%rip), %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %rax
movl %eax, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $1024, %rdx # imm = 0x400
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
movq %rsi, 96(%rsp)
movl %r15d, 44(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 104(%rsp), %rax
movq %rax, 144(%rsp)
leaq 96(%rsp), %rax
movq %rax, 152(%rsp)
leaq 44(%rsp), %rax
movq %rax, 160(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z13cudaGrayScalePfS_S_S_i, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9grayscalePfS_S_S_i, .Lfunc_end1-_Z9grayscalePfS_S_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13cudaGrayScalePfS_S_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13cudaGrayScalePfS_S_S_i,@object # @_Z13cudaGrayScalePfS_S_S_i
.section .rodata,"a",@progbits
.globl _Z13cudaGrayScalePfS_S_S_i
.p2align 3, 0x0
_Z13cudaGrayScalePfS_S_S_i:
.quad _Z28__device_stub__cudaGrayScalePfS_S_S_i
.size _Z13cudaGrayScalePfS_S_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13cudaGrayScalePfS_S_S_i"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__cudaGrayScalePfS_S_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13cudaGrayScalePfS_S_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include <stdexcept>
#include <vector>
#include <chrono>
#include <time.h>
double gpu_time_used;
#define I(row, col, ncols) (row * ncols + col)
#define CUDA_CALL(x) {if((x) != cudaSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE);}}
__global__ void get_dst(float *dst, float *x, float *y,
float *mu_x, float *mu_y){
int i = blockIdx.x;
int j = threadIdx.x;
dst[I(i, j, blockDim.x)] = (x[i] - mu_x[j]) * (x[i] - mu_x[j]);
dst[I(i, j, blockDim.x)] += (y[i] - mu_y[j]) * (y[i] - mu_y[j]);
}
__global__ void regroup(int *group, float *dst, int k){
int i = blockIdx.x;
int j;
float min_dst;
min_dst = dst[I(i, 0, k)];
group[i] = 1;
for(j = 1; j < k; ++j){
if(dst[I(i, j, k)] < min_dst){
min_dst = dst[I(i, j, k)];
group[i] = j + 1;
}
}
}
__global__ void clear(float *sum_x, float *sum_y, int *nx, int *ny){
int j = threadIdx.x;
sum_x[j] = 0;
sum_y[j] = 0;
nx[j] = 0;
ny[j] = 0;
}
__global__ void recenter_step1(float *sum_x, float *sum_y, int *nx, int *ny,
float *x, float *y, int *group, int n){
int i;
int j = threadIdx.x;
for(i = 0; i < n; ++i){
if(group[i] == (j + 1)){
sum_x[j] += x[i];
sum_y[j] += y[i];
nx[j]++;
ny[j]++;
}
}
}
__global__ void recenter_step2(float *mu_x, float *mu_y, float *sum_x,
float *sum_y, int *nx, int *ny){
int j = threadIdx.x;
mu_x[j] = sum_x[j]/nx[j];
mu_y[j] = sum_y[j]/ny[j];
}
void kmeans(int nreps, int n, int k,
float *x_d, float *y_d, float *mu_x_d, float *mu_y_d,
int *group_d, int *nx_d, int *ny_d,
float *sum_x_d, float *sum_y_d, float *dst_d){
int i;
for(i = 0; i < nreps; ++i){
get_dst<<<n,k>>>(dst_d, x_d, y_d, mu_x_d, mu_y_d);
regroup<<<n,1>>>(group_d, dst_d, k);
clear<<<1,k>>>(sum_x_d, sum_y_d, nx_d, ny_d);
recenter_step1<<<1,k>>>(sum_x_d, sum_y_d, nx_d, ny_d, x_d, y_d, group_d, n);
recenter_step2<<<1,k>>>(mu_x_d, mu_y_d, sum_x_d, sum_y_d, nx_d, ny_d);
}
}
void read_data(float **x, float **y, float **mu_x, float **mu_y, int *n, int *k,char* arg);
void print_results(int *group, float *mu_x, float *mu_y, int n, int k,char* argv);
int main(int argc,char* argv[]){
/* cpu variables */
int n; /* number of points */
int k; /* number of clusters */
// int *group;
float *x = NULL, *y = NULL, *mu_x = NULL, *mu_y = NULL;
/* gpu variables */
int *group_d, *nx_d, *ny_d;
float *x_d, *y_d, *mu_x_d, *mu_y_d, *sum_x_d, *sum_y_d, *dst_d;
/* read data from files on cpu */
read_data(&x, &y, &mu_x, &mu_y, &n, &k,argv[2]);
/* allocate cpu memory */
// group = (int*) malloc(n*sizeof(int));
/* allocate gpu memory */
cudaMallocManaged(&group_d,n*sizeof(int));
cudaMallocManaged(&nx_d, k*sizeof(int));
cudaMallocManaged(&ny_d, k*sizeof(int));
cudaMallocManaged(&x_d, n*sizeof(float));
cudaMallocManaged(&y_d, n*sizeof(float));
cudaMallocManaged(&mu_x_d, k*sizeof(float));
cudaMallocManaged(&mu_y_d, k*sizeof(float));
cudaMallocManaged(&sum_x_d, k*sizeof(float));
cudaMallocManaged(&sum_y_d, k*sizeof(float));
cudaMallocManaged(&dst_d, n*k*sizeof(float));
memcpy(x_d, x, n*sizeof(float));
memcpy(y_d, x, n*sizeof(float));
memcpy(mu_x_d, mu_x, k*sizeof(float));
memcpy(mu_y_d, mu_y, k*sizeof(float));
/* write data to gpu */
// CUDA_CALL(cudaMemcpy(x_d, x, n*sizeof(float), cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(y_d, y, n*sizeof(float), cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(mu_x_d, mu_x, k*sizeof(float), cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(mu_y_d, mu_y, k*sizeof(float), cudaMemcpyHostToDevice));
/* perform kmeans */
const auto start = std::chrono::high_resolution_clock::now();
kmeans(100, n, k, x_d, y_d, mu_x_d, mu_y_d, group_d, nx_d, ny_d, sum_x_d, sum_y_d, dst_d);
const auto end = std::chrono::high_resolution_clock::now();
const auto duration =
std::chrono::duration_cast<std::chrono::duration<float>>(end - start);
std::cerr << "CUDA Took: " << duration.count() << "s" << " for "<<argv[3]<<" points." << std::endl;
gpu_time_used = duration.count();
/* read back data from gpu */
// CUDA_CALL(cudaMemcpy(group, group_d, n*sizeof(int), cudaMemcpyDeviceToHost));
// CUDA_CALL(cudaMemcpy(mu_x, mu_x_d, k*sizeof(float), cudaMemcpyDeviceToHost));
// CUDA_CALL(cudaMemcpy(mu_y, mu_y_d, k*sizeof(float), cudaMemcpyDeviceToHost));
/* print results and clean up */
print_results(group_d, mu_x_d, mu_y_d, n, k,argv[3]);
free(x);
free(y);
free(mu_x);
free(mu_y);
// free(group);
CUDA_CALL(cudaFree(x_d));
CUDA_CALL(cudaFree(y_d));
CUDA_CALL(cudaFree(mu_x_d));
CUDA_CALL(cudaFree(mu_y_d));
CUDA_CALL(cudaFree(group_d));
CUDA_CALL(cudaFree(nx_d));
CUDA_CALL(cudaFree(ny_d));
CUDA_CALL(cudaFree(sum_x_d));
CUDA_CALL(cudaFree(sum_y_d));
CUDA_CALL(cudaFree(dst_d));
return 0;
}
void read_data(float **x, float **y, float **mu_x, float **mu_y, int *n, int *k,char* arg){
FILE *fp;
char buf[64];
*n = 0;
fp = fopen(arg, "r");
while(fgets(buf, 64, fp) != NULL){
*n += 1;
*x = (float*) realloc(*x, (*n)*sizeof(float));
*y = (float*) realloc(*y, (*n)*sizeof(float));
std::istringstream line_stream(buf);
float x1,y1;
line_stream >> x1 >> y1;
(*x)[*n - 1] = x1;
(*y)[*n - 1] = y1;
}
fclose(fp);
*k = 0;
fp = fopen("../../data/kmeans/initCoord.txt", "r");
while(fgets(buf, 64, fp) != NULL){
*k += 1;
*mu_x = (float*) realloc(*mu_x, (*k)*sizeof(float));
*mu_y = (float*) realloc(*mu_y, (*k)*sizeof(float));
std::istringstream line_stream(buf);
float x1,y1;
line_stream >> x1 >> y1;
(*mu_x)[*k - 1] = x1;
(*mu_y)[*k - 1] = x1;
}
fclose(fp);
}
void print_results(int *group, float *mu_x, float *mu_y, int n, int k,char* arg){
FILE *fp;
int i;
std::string str(arg),str1,str2;
str = "result/cuda/" + str;
str1 = str + "_group_members.txt";
fp = fopen(str1.c_str(), "w");
for(i = 0; i < n; ++i){
fprintf(fp, "%d\n", group[i]);
}
fclose(fp);
str2 = str + "_centroids.txt";
fp = fopen(str2.c_str(), "w");
for(i = 0; i < k; ++i){
fprintf(fp, "%0.6f %0.6f\n", mu_x[i], mu_y[i]);
}
fclose(fp);
fp = fopen("CUDAtimes.txt", "a");
fprintf(fp, "%0.6f\n", gpu_time_used);
fclose(fp);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include <stdexcept>
#include <vector>
#include <chrono>
#include <time.h>
double gpu_time_used;
#define I(row, col, ncols) (row * ncols + col)
#define CUDA_CALL(x) {if((x) != hipSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", hipGetErrorString(hipGetLastError())); \
exit(EXIT_FAILURE);}}
__global__ void get_dst(float *dst, float *x, float *y,
float *mu_x, float *mu_y){
int i = blockIdx.x;
int j = threadIdx.x;
dst[I(i, j, blockDim.x)] = (x[i] - mu_x[j]) * (x[i] - mu_x[j]);
dst[I(i, j, blockDim.x)] += (y[i] - mu_y[j]) * (y[i] - mu_y[j]);
}
__global__ void regroup(int *group, float *dst, int k){
int i = blockIdx.x;
int j;
float min_dst;
min_dst = dst[I(i, 0, k)];
group[i] = 1;
for(j = 1; j < k; ++j){
if(dst[I(i, j, k)] < min_dst){
min_dst = dst[I(i, j, k)];
group[i] = j + 1;
}
}
}
__global__ void clear(float *sum_x, float *sum_y, int *nx, int *ny){
int j = threadIdx.x;
sum_x[j] = 0;
sum_y[j] = 0;
nx[j] = 0;
ny[j] = 0;
}
__global__ void recenter_step1(float *sum_x, float *sum_y, int *nx, int *ny,
float *x, float *y, int *group, int n){
int i;
int j = threadIdx.x;
for(i = 0; i < n; ++i){
if(group[i] == (j + 1)){
sum_x[j] += x[i];
sum_y[j] += y[i];
nx[j]++;
ny[j]++;
}
}
}
__global__ void recenter_step2(float *mu_x, float *mu_y, float *sum_x,
float *sum_y, int *nx, int *ny){
int j = threadIdx.x;
mu_x[j] = sum_x[j]/nx[j];
mu_y[j] = sum_y[j]/ny[j];
}
void kmeans(int nreps, int n, int k,
float *x_d, float *y_d, float *mu_x_d, float *mu_y_d,
int *group_d, int *nx_d, int *ny_d,
float *sum_x_d, float *sum_y_d, float *dst_d){
int i;
for(i = 0; i < nreps; ++i){
get_dst<<<n,k>>>(dst_d, x_d, y_d, mu_x_d, mu_y_d);
regroup<<<n,1>>>(group_d, dst_d, k);
clear<<<1,k>>>(sum_x_d, sum_y_d, nx_d, ny_d);
recenter_step1<<<1,k>>>(sum_x_d, sum_y_d, nx_d, ny_d, x_d, y_d, group_d, n);
recenter_step2<<<1,k>>>(mu_x_d, mu_y_d, sum_x_d, sum_y_d, nx_d, ny_d);
}
}
void read_data(float **x, float **y, float **mu_x, float **mu_y, int *n, int *k,char* arg);
void print_results(int *group, float *mu_x, float *mu_y, int n, int k,char* argv);
int main(int argc,char* argv[]){
/* cpu variables */
int n; /* number of points */
int k; /* number of clusters */
// int *group;
float *x = NULL, *y = NULL, *mu_x = NULL, *mu_y = NULL;
/* gpu variables */
int *group_d, *nx_d, *ny_d;
float *x_d, *y_d, *mu_x_d, *mu_y_d, *sum_x_d, *sum_y_d, *dst_d;
/* read data from files on cpu */
read_data(&x, &y, &mu_x, &mu_y, &n, &k,argv[2]);
/* allocate cpu memory */
// group = (int*) malloc(n*sizeof(int));
/* allocate gpu memory */
hipMallocManaged(&group_d,n*sizeof(int));
hipMallocManaged(&nx_d, k*sizeof(int));
hipMallocManaged(&ny_d, k*sizeof(int));
hipMallocManaged(&x_d, n*sizeof(float));
hipMallocManaged(&y_d, n*sizeof(float));
hipMallocManaged(&mu_x_d, k*sizeof(float));
hipMallocManaged(&mu_y_d, k*sizeof(float));
hipMallocManaged(&sum_x_d, k*sizeof(float));
hipMallocManaged(&sum_y_d, k*sizeof(float));
hipMallocManaged(&dst_d, n*k*sizeof(float));
memcpy(x_d, x, n*sizeof(float));
memcpy(y_d, x, n*sizeof(float));
memcpy(mu_x_d, mu_x, k*sizeof(float));
memcpy(mu_y_d, mu_y, k*sizeof(float));
/* write data to gpu */
// CUDA_CALL(cudaMemcpy(x_d, x, n*sizeof(float), cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(y_d, y, n*sizeof(float), cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(mu_x_d, mu_x, k*sizeof(float), cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(mu_y_d, mu_y, k*sizeof(float), cudaMemcpyHostToDevice));
/* perform kmeans */
const auto start = std::chrono::high_resolution_clock::now();
kmeans(100, n, k, x_d, y_d, mu_x_d, mu_y_d, group_d, nx_d, ny_d, sum_x_d, sum_y_d, dst_d);
const auto end = std::chrono::high_resolution_clock::now();
const auto duration =
std::chrono::duration_cast<std::chrono::duration<float>>(end - start);
std::cerr << "CUDA Took: " << duration.count() << "s" << " for "<<argv[3]<<" points." << std::endl;
gpu_time_used = duration.count();
/* read back data from gpu */
// CUDA_CALL(cudaMemcpy(group, group_d, n*sizeof(int), cudaMemcpyDeviceToHost));
// CUDA_CALL(cudaMemcpy(mu_x, mu_x_d, k*sizeof(float), cudaMemcpyDeviceToHost));
// CUDA_CALL(cudaMemcpy(mu_y, mu_y_d, k*sizeof(float), cudaMemcpyDeviceToHost));
/* print results and clean up */
print_results(group_d, mu_x_d, mu_y_d, n, k,argv[3]);
free(x);
free(y);
free(mu_x);
free(mu_y);
// free(group);
CUDA_CALL(hipFree(x_d));
CUDA_CALL(hipFree(y_d));
CUDA_CALL(hipFree(mu_x_d));
CUDA_CALL(hipFree(mu_y_d));
CUDA_CALL(hipFree(group_d));
CUDA_CALL(hipFree(nx_d));
CUDA_CALL(hipFree(ny_d));
CUDA_CALL(hipFree(sum_x_d));
CUDA_CALL(hipFree(sum_y_d));
CUDA_CALL(hipFree(dst_d));
return 0;
}
void read_data(float **x, float **y, float **mu_x, float **mu_y, int *n, int *k,char* arg){
FILE *fp;
char buf[64];
*n = 0;
fp = fopen(arg, "r");
while(fgets(buf, 64, fp) != NULL){
*n += 1;
*x = (float*) realloc(*x, (*n)*sizeof(float));
*y = (float*) realloc(*y, (*n)*sizeof(float));
std::istringstream line_stream(buf);
float x1,y1;
line_stream >> x1 >> y1;
(*x)[*n - 1] = x1;
(*y)[*n - 1] = y1;
}
fclose(fp);
*k = 0;
fp = fopen("../../data/kmeans/initCoord.txt", "r");
while(fgets(buf, 64, fp) != NULL){
*k += 1;
*mu_x = (float*) realloc(*mu_x, (*k)*sizeof(float));
*mu_y = (float*) realloc(*mu_y, (*k)*sizeof(float));
std::istringstream line_stream(buf);
float x1,y1;
line_stream >> x1 >> y1;
(*mu_x)[*k - 1] = x1;
(*mu_y)[*k - 1] = x1;
}
fclose(fp);
}
void print_results(int *group, float *mu_x, float *mu_y, int n, int k,char* arg){
FILE *fp;
int i;
std::string str(arg),str1,str2;
str = "result/cuda/" + str;
str1 = str + "_group_members.txt";
fp = fopen(str1.c_str(), "w");
for(i = 0; i < n; ++i){
fprintf(fp, "%d\n", group[i]);
}
fclose(fp);
str2 = str + "_centroids.txt";
fp = fopen(str2.c_str(), "w");
for(i = 0; i < k; ++i){
fprintf(fp, "%0.6f %0.6f\n", mu_x[i], mu_y[i]);
}
fclose(fp);
fp = fopen("CUDAtimes.txt", "a");
fprintf(fp, "%0.6f\n", gpu_time_used);
fclose(fp);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.